code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import numpy as np
import pandas as pd
def fetch_data(faultNumber, position):
df1 = pd.read_csv("./data/TEP_CaseStudy_Fault_" + str(faultNumber) + "_Pos_" + str(position) + "%.csv")
df1.set_index(df1.columns[0])
df1 = df1.drop(columns=[df1.columns[0]])
df2 = pd.read_csv("./data/TEP_CaseStudy_Fault_" + str(faultNumber) + "_Pos_" + str(position) + "%_LSTM-AE_Output.csv")
df2.set_index(df2.columns[0])
df2 = df2.drop(columns=[df2.columns[0]])
df1 = df1.join(df2["Loss_mae"])
df1 = df1.join(df2["Threshold"])
df1["pointType"] = df1.apply(lambda row: _label_point(row), axis=1)
df2.join(df1["pointType"])
return df1
def _label_point(row):
if np.isnan(row.Threshold):
return "TR"
if (row["Loss_mae"] >= row["Threshold"]) and (row["faultNumber"] != 0):
return "TP"
if (row["Loss_mae"] < row["Threshold"]) and (row["faultNumber"] != 0):
return "FN"
if (row["Loss_mae"] >= row["Threshold"]) and (row["faultNumber"] == 0):
return "FP"
if (row["Loss_mae"] < row["Threshold"]) and (row["faultNumber"] == 0):
return "TN"
|
normal
|
{
"blob_id": "d71ec86f68cc81c93a39f15c785c75c2a1023f14",
"index": 2129,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fetch_data(faultNumber, position):\n df1 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +\n '_Pos_' + str(position) + '%.csv')\n df1.set_index(df1.columns[0])\n df1 = df1.drop(columns=[df1.columns[0]])\n df2 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +\n '_Pos_' + str(position) + '%_LSTM-AE_Output.csv')\n df2.set_index(df2.columns[0])\n df2 = df2.drop(columns=[df2.columns[0]])\n df1 = df1.join(df2['Loss_mae'])\n df1 = df1.join(df2['Threshold'])\n df1['pointType'] = df1.apply(lambda row: _label_point(row), axis=1)\n df2.join(df1['pointType'])\n return df1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fetch_data(faultNumber, position):\n df1 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +\n '_Pos_' + str(position) + '%.csv')\n df1.set_index(df1.columns[0])\n df1 = df1.drop(columns=[df1.columns[0]])\n df2 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +\n '_Pos_' + str(position) + '%_LSTM-AE_Output.csv')\n df2.set_index(df2.columns[0])\n df2 = df2.drop(columns=[df2.columns[0]])\n df1 = df1.join(df2['Loss_mae'])\n df1 = df1.join(df2['Threshold'])\n df1['pointType'] = df1.apply(lambda row: _label_point(row), axis=1)\n df2.join(df1['pointType'])\n return df1\n\n\ndef _label_point(row):\n if np.isnan(row.Threshold):\n return 'TR'\n if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] != 0:\n return 'TP'\n if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] != 0:\n return 'FN'\n if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] == 0:\n return 'FP'\n if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] == 0:\n return 'TN'\n",
"step-4": "import numpy as np\nimport pandas as pd\n\n\ndef fetch_data(faultNumber, position):\n df1 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +\n '_Pos_' + str(position) + '%.csv')\n df1.set_index(df1.columns[0])\n df1 = df1.drop(columns=[df1.columns[0]])\n df2 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +\n '_Pos_' + str(position) + '%_LSTM-AE_Output.csv')\n df2.set_index(df2.columns[0])\n df2 = df2.drop(columns=[df2.columns[0]])\n df1 = df1.join(df2['Loss_mae'])\n df1 = df1.join(df2['Threshold'])\n df1['pointType'] = df1.apply(lambda row: _label_point(row), axis=1)\n df2.join(df1['pointType'])\n return df1\n\n\ndef _label_point(row):\n if np.isnan(row.Threshold):\n return 'TR'\n if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] != 0:\n return 'TP'\n if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] != 0:\n return 'FN'\n if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] == 0:\n return 'FP'\n if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] == 0:\n return 'TN'\n",
"step-5": "import numpy as np\nimport pandas as pd\n\n\ndef fetch_data(faultNumber, position):\n df1 = pd.read_csv(\"./data/TEP_CaseStudy_Fault_\" + str(faultNumber) + \"_Pos_\" + str(position) + \"%.csv\")\n df1.set_index(df1.columns[0])\n df1 = df1.drop(columns=[df1.columns[0]])\n\n df2 = pd.read_csv(\"./data/TEP_CaseStudy_Fault_\" + str(faultNumber) + \"_Pos_\" + str(position) + \"%_LSTM-AE_Output.csv\")\n df2.set_index(df2.columns[0])\n df2 = df2.drop(columns=[df2.columns[0]])\n\n df1 = df1.join(df2[\"Loss_mae\"])\n df1 = df1.join(df2[\"Threshold\"])\n\n df1[\"pointType\"] = df1.apply(lambda row: _label_point(row), axis=1)\n\n df2.join(df1[\"pointType\"])\n\n return df1\n\n\ndef _label_point(row):\n if np.isnan(row.Threshold):\n return \"TR\"\n if (row[\"Loss_mae\"] >= row[\"Threshold\"]) and (row[\"faultNumber\"] != 0):\n return \"TP\"\n if (row[\"Loss_mae\"] < row[\"Threshold\"]) and (row[\"faultNumber\"] != 0):\n return \"FN\"\n if (row[\"Loss_mae\"] >= row[\"Threshold\"]) and (row[\"faultNumber\"] == 0):\n return \"FP\"\n if (row[\"Loss_mae\"] < row[\"Threshold\"]) and (row[\"faultNumber\"] == 0):\n return \"TN\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 251)
MainWindow.setStyleSheet("/*\n"
"Neon Style Sheet for QT Applications (QpushButton)\n"
"Author: Jaime A. Quiroga P.\n"
"Company: GTRONICK\n"
"Last updated: 24/10/2020, 15:42.\n"
"Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n"
"*/\n"
"QPushButton{\n"
" border-style: solid;\n"
" border-color: #050a0e;\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
" background-color: #100E19;\n"
"}\n"
"QPushButton::default{\n"
" border-style: solid;\n"
" border-color: #050a0e;\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #FFFFFF;\n"
" padding: 2px;\n"
" background-color: #151a1e;\n"
"}\n"
"QPushButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-width: 2px;\n"
" border-radius: 1px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
"}\n"
"QPushButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n"
" border-width: 2px;\n"
" border-radius: 1px;\n"
" color: #d3dae3;\n"
" padding: 2px;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))
self.pushButton_3.setStyleSheet("")
self.pushButton_3.setObjectName("pushButton_3")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))
self.label_2.setObjectName("label_2")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))
self.pushButton_2.setStyleSheet("")
self.pushButton_2.setObjectName("pushButton_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))
self.pushButton.setStyleSheet("")
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))
self.label.setObjectName("label")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))
self.comboBox.setStyleSheet("background-color: rgb(101, 101, 101);")
self.comboBox.setObjectName("comboBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_3.setText(_translate("MainWindow", "Download"))
self.label_2.setText(_translate("MainWindow", "Save location"))
self.pushButton_2.setText(_translate("MainWindow", "Search"))
self.label_3.setText(_translate("MainWindow", "Qualiti"))
self.pushButton.setText(_translate("MainWindow", "Browse"))
self.label.setText(_translate("MainWindow", "Video URL"))
|
normal
|
{
"blob_id": "2d503c93160b6f44fba2495f0ae0cf9ba0eaf9d6",
"index": 8930,
"step-1": "<mask token>\n\n\nclass Ui_MainWindow(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_MainWindow(object):\n <mask token>\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton_3.setText(_translate('MainWindow', 'Download'))\n self.label_2.setText(_translate('MainWindow', 'Save location'))\n self.pushButton_2.setText(_translate('MainWindow', 'Search'))\n self.label_3.setText(_translate('MainWindow', 'Qualiti'))\n self.pushButton.setText(_translate('MainWindow', 'Browse'))\n self.label.setText(_translate('MainWindow', 'Video URL'))\n",
"step-3": "<mask token>\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(500, 251)\n MainWindow.setStyleSheet(\n \"\"\"/*\nNeon Style Sheet for QT Applications (QpushButton)\nAuthor: Jaime A. Quiroga P.\nCompany: GTRONICK\nLast updated: 24/10/2020, 15:42.\nAvailable at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n*/\nQPushButton{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #d3dae3;\n padding: 2px;\n background-color: #100E19;\n}\nQPushButton::default{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #FFFFFF;\n padding: 2px;\n background-color: #151a1e;\n}\nQPushButton:hover{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\nQPushButton:pressed{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\"\"\"\n )\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))\n self.pushButton_3.setStyleSheet('')\n self.pushButton_3.setObjectName('pushButton_3')\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))\n self.lineEdit.setObjectName('lineEdit')\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))\n self.label_2.setObjectName('label_2')\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))\n self.lineEdit_2.setObjectName('lineEdit_2')\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))\n self.pushButton_2.setStyleSheet('')\n self.pushButton_2.setObjectName('pushButton_2')\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))\n self.label_3.setObjectName('label_3')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))\n self.pushButton.setStyleSheet('')\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))\n self.label.setObjectName('label')\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))\n self.comboBox.setStyleSheet('background-color: rgb(101, 101, 101);')\n self.comboBox.setObjectName('comboBox')\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))\n self.menubar.setObjectName('menubar')\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton_3.setText(_translate('MainWindow', 'Download'))\n self.label_2.setText(_translate('MainWindow', 'Save location'))\n self.pushButton_2.setText(_translate('MainWindow', 'Search'))\n self.label_3.setText(_translate('MainWindow', 'Qualiti'))\n self.pushButton.setText(_translate('MainWindow', 'Browse'))\n self.label.setText(_translate('MainWindow', 'Video URL'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(500, 251)\n MainWindow.setStyleSheet(\n \"\"\"/*\nNeon Style Sheet for QT Applications (QpushButton)\nAuthor: Jaime A. Quiroga P.\nCompany: GTRONICK\nLast updated: 24/10/2020, 15:42.\nAvailable at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\n*/\nQPushButton{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #d3dae3;\n padding: 2px;\n background-color: #100E19;\n}\nQPushButton::default{\n border-style: solid;\n border-color: #050a0e;\n border-width: 1px;\n border-radius: 5px;\n color: #FFFFFF;\n padding: 2px;\n background-color: #151a1e;\n}\nQPushButton:hover{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\nQPushButton:pressed{\n border-style: solid;\n border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\n border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\n border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\n border-width: 2px;\n border-radius: 1px;\n color: #d3dae3;\n padding: 2px;\n}\"\"\"\n )\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))\n self.pushButton_3.setStyleSheet('')\n self.pushButton_3.setObjectName('pushButton_3')\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))\n self.lineEdit.setObjectName('lineEdit')\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))\n self.label_2.setObjectName('label_2')\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))\n self.lineEdit_2.setObjectName('lineEdit_2')\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))\n self.pushButton_2.setStyleSheet('')\n self.pushButton_2.setObjectName('pushButton_2')\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))\n self.label_3.setObjectName('label_3')\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))\n self.pushButton.setStyleSheet('')\n self.pushButton.setObjectName('pushButton')\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))\n self.label.setObjectName('label')\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))\n self.comboBox.setStyleSheet('background-color: rgb(101, 101, 101);')\n self.comboBox.setObjectName('comboBox')\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))\n self.menubar.setObjectName('menubar')\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))\n self.pushButton_3.setText(_translate('MainWindow', 'Download'))\n self.label_2.setText(_translate('MainWindow', 'Save location'))\n self.pushButton_2.setText(_translate('MainWindow', 'Search'))\n self.label_3.setText(_translate('MainWindow', 'Qualiti'))\n self.pushButton.setText(_translate('MainWindow', 'Browse'))\n self.label.setText(_translate('MainWindow', 'Video URL'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'main.ui'\n#\n# Created by: PyQt5 UI code generator 5.14.1\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(500, 251)\n MainWindow.setStyleSheet(\"/*\\n\"\n\"Neon Style Sheet for QT Applications (QpushButton)\\n\"\n\"Author: Jaime A. Quiroga P.\\n\"\n\"Company: GTRONICK\\n\"\n\"Last updated: 24/10/2020, 15:42.\\n\"\n\"Available at: https://github.com/GTRONICK/QSS/blob/master/NeonButtons.qss\\n\"\n\"*/\\n\"\n\"QPushButton{\\n\"\n\" border-style: solid;\\n\"\n\" border-color: #050a0e;\\n\"\n\" border-width: 1px;\\n\"\n\" border-radius: 5px;\\n\"\n\" color: #d3dae3;\\n\"\n\" padding: 2px;\\n\"\n\" background-color: #100E19;\\n\"\n\"}\\n\"\n\"QPushButton::default{\\n\"\n\" border-style: solid;\\n\"\n\" border-color: #050a0e;\\n\"\n\" border-width: 1px;\\n\"\n\" border-radius: 5px;\\n\"\n\" color: #FFFFFF;\\n\"\n\" padding: 2px;\\n\"\n\" background-color: #151a1e;\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" border-style: solid;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #C0DB50, stop:0.4 #C0DB50, stop:0.5 #100E19, stop:1 #100E19);\\n\"\n\" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #C0DB50, stop:1 #C0DB50);\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #C0DB50, stop:0.3 #C0DB50, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-width: 2px;\\n\"\n\" border-radius: 1px;\\n\"\n\" color: #d3dae3;\\n\"\n\" padding: 2px;\\n\"\n\"}\\n\"\n\"QPushButton:pressed{\\n\"\n\" border-style: solid;\\n\"\n\" border-top-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #d33af1, stop:0.4 #d33af1, stop:0.5 #100E19, stop:1 #100E19);\\n\"\n\" border-bottom-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:1, stop:0 #100E19, stop:0.5 #100E19, stop:0.6 #d33af1, stop:1 #d33af1);\\n\"\n\" border-left-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-right-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 #d33af1, stop:0.3 #d33af1, stop:0.7 #100E19, stop:1 #100E19);\\n\"\n\" border-width: 2px;\\n\"\n\" border-radius: 1px;\\n\"\n\" color: #d3dae3;\\n\"\n\" padding: 2px;\\n\"\n\"}\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(330, 180, 141, 31))\n self.pushButton_3.setStyleSheet(\"\")\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(130, 20, 341, 25))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(20, 70, 91, 20))\n self.label_2.setObjectName(\"label_2\")\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(130, 70, 261, 25))\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(130, 180, 141, 31))\n self.pushButton_2.setStyleSheet(\"\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(20, 120, 64, 17))\n self.label_3.setObjectName(\"label_3\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(400, 70, 71, 25))\n self.pushButton.setStyleSheet(\"\")\n self.pushButton.setObjectName(\"pushButton\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(20, 20, 71, 21))\n self.label.setObjectName(\"label\")\n self.comboBox = QtWidgets.QComboBox(self.centralwidget)\n self.comboBox.setGeometry(QtCore.QRect(130, 120, 341, 25))\n self.comboBox.setStyleSheet(\"background-color: rgb(101, 101, 101);\")\n self.comboBox.setObjectName(\"comboBox\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"Download\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Save location\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"Search\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Qualiti\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Browse\"))\n self.label.setText(_translate(\"MainWindow\", \"Video URL\"))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Sprite:
def __init__(self, name: str, index: str, xCoord: int, yCoord: int,
heading: int, scale: float, volume: int, pan: int, rotation: int,
draggable: bool, hidden: bool, costumes: str, color: (float, float,
float), pen: str, id: int):
self.name = name
self.index = index
self.coords = xCoord, yCoord
self.heading = heading
self.scale = scale
self.volume = volume
self.pan = pan
self.rotation = rotation
self.draggable = draggable
self.hidden = hidden
self.costumes = costumes
self.color = color
self.pen = pen
self.id = id
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stage:
<|reserved_special_token_0|>
class Sprite:
def __init__(self, name: str, index: str, xCoord: int, yCoord: int,
heading: int, scale: float, volume: int, pan: int, rotation: int,
draggable: bool, hidden: bool, costumes: str, color: (float, float,
float), pen: str, id: int):
self.name = name
self.index = index
self.coords = xCoord, yCoord
self.heading = heading
self.scale = scale
self.volume = volume
self.pan = pan
self.rotation = rotation
self.draggable = draggable
self.hidden = hidden
self.costumes = costumes
self.color = color
self.pen = pen
self.id = id
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stage:
def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):
self.costumes = costumes
self.sounds = sounds
self.variables = variables
self.blocks = blocks
self.scripts = scripts
self.sprites = sprites
class Sprite:
def __init__(self, name: str, index: str, xCoord: int, yCoord: int,
heading: int, scale: float, volume: int, pan: int, rotation: int,
draggable: bool, hidden: bool, costumes: str, color: (float, float,
float), pen: str, id: int):
self.name = name
self.index = index
self.coords = xCoord, yCoord
self.heading = heading
self.scale = scale
self.volume = volume
self.pan = pan
self.rotation = rotation
self.draggable = draggable
self.hidden = hidden
self.costumes = costumes
self.color = color
self.pen = pen
self.id = id
<|reserved_special_token_1|>
import xml.etree.ElementTree as ET
class Stage:
def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):
self.costumes = costumes
self.sounds = sounds
self.variables = variables
self.blocks = blocks
self.scripts = scripts
self.sprites = sprites
class Sprite:
def __init__(self, name: str, index: str, xCoord: int, yCoord: int,
heading: int, scale: float, volume: int, pan: int, rotation: int,
draggable: bool, hidden: bool, costumes: str, color: (float, float,
float), pen: str, id: int):
self.name = name
self.index = index
self.coords = xCoord, yCoord
self.heading = heading
self.scale = scale
self.volume = volume
self.pan = pan
self.rotation = rotation
self.draggable = draggable
self.hidden = hidden
self.costumes = costumes
self.color = color
self.pen = pen
self.id = id
|
flexible
|
{
"blob_id": "575768c200ad81f878c132d68569c84f497091f2",
"index": 8137,
"step-1": "<mask token>\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-2": "<mask token>\n\n\nclass Stage:\n <mask token>\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-3": "<mask token>\n\n\nclass Stage:\n\n def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):\n self.costumes = costumes\n self.sounds = sounds\n self.variables = variables\n self.blocks = blocks\n self.scripts = scripts\n self.sprites = sprites\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-4": "import xml.etree.ElementTree as ET\n\n\nclass Stage:\n\n def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):\n self.costumes = costumes\n self.sounds = sounds\n self.variables = variables\n self.blocks = blocks\n self.scripts = scripts\n self.sprites = sprites\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@unittest.skipIf(sys.platform.startswith('win'),
'subprocess complications on Windows')
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@unittest.skipIf(sys.platform.startswith('win'),
'subprocess complications on Windows')
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED,
'Jep library built without numpy support')
def test_numpy_prod_succeeds(self):
jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@unittest.skipIf(sys.platform.startswith('win'),
'subprocess complications on Windows')
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED,
'Jep library built without numpy support')
def test_numpy_prod_succeeds(self):
jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED,
'Jep library built without numpy support')
def test_numpy_array_to_string(self):
jep_pipe(build_java_process_cmd(
'jep.test.numpy.TestNumpyArrayToString'))
<|reserved_special_token_1|>
import unittest
import sys
from tests.jep_pipe import jep_pipe
from tests.jep_pipe import build_java_process_cmd
import jep
@unittest.skipIf(sys.platform.startswith('win'),
'subprocess complications on Windows')
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED,
'Jep library built without numpy support')
def test_numpy_prod_succeeds(self):
jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED,
'Jep library built without numpy support')
def test_numpy_array_to_string(self):
jep_pipe(build_java_process_cmd(
'jep.test.numpy.TestNumpyArrayToString'))
<|reserved_special_token_1|>
import unittest
import sys
from tests.jep_pipe import jep_pipe
from tests.jep_pipe import build_java_process_cmd
import jep
@unittest.skipIf(sys.platform.startswith("win"), "subprocess complications on Windows")
class TestSharedModules(unittest.TestCase):
def setUp(self):
pass
def test_shared_modules(self):
jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')
def test_numpy_prod_succeeds(self):
jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))
@unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')
def test_numpy_array_to_string(self):
jep_pipe(build_java_process_cmd(
'jep.test.numpy.TestNumpyArrayToString'))
|
flexible
|
{
"blob_id": "39bc90f34cccebe9a8b1475e396caa1c14f6b2df",
"index": 9004,
"step-1": "<mask token>\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n <mask token>\n",
"step-3": "<mask token>\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-4": "import unittest\nimport sys\nfrom tests.jep_pipe import jep_pipe\nfrom tests.jep_pipe import build_java_process_cmd\nimport jep\n\n\[email protected](sys.platform.startswith('win'),\n 'subprocess complications on Windows')\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED,\n 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-5": "import unittest\nimport sys\nfrom tests.jep_pipe import jep_pipe\nfrom tests.jep_pipe import build_java_process_cmd\nimport jep\n\n\[email protected](sys.platform.startswith(\"win\"), \"subprocess complications on Windows\")\nclass TestSharedModules(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_shared_modules(self):\n jep_pipe(build_java_process_cmd('jep.test.TestSharedModules'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')\n def test_numpy_prod_succeeds(self):\n jep_pipe(build_java_process_cmd('jep.test.numpy.TestNumpyProdShared'))\n\n @unittest.skipIf(not jep.JEP_NUMPY_ENABLED, 'Jep library built without numpy support')\n def test_numpy_array_to_string(self):\n jep_pipe(build_java_process_cmd(\n 'jep.test.numpy.TestNumpyArrayToString'))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 3.2.7 on 2021-09-11 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),
]
operations = [
migrations.AlterField(
model_name='order',
name='created_at',
field=models.IntegerField(blank=True, null=True),
),
]
|
normal
|
{
"blob_id": "de347b41cd88947690cb42e043880a80d81e2c5c",
"index": 436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cryptocurrency',\n '0012_rename_cancel_exists_order_cancel_exist')]\n operations = [migrations.AlterField(model_name='order', name=\n 'created_at', field=models.IntegerField(blank=True, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cryptocurrency',\n '0012_rename_cancel_exists_order_cancel_exist')]\n operations = [migrations.AlterField(model_name='order', name=\n 'created_at', field=models.IntegerField(blank=True, null=True))]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-09-11 19:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='created_at',\n field=models.IntegerField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestTransliteratePackage(unittest.TestCase):
<|reserved_special_token_0|>
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine='royin'), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(romanize(word, engine='royin'), romanize(part1,
engine='royin') + romanize(part2, engine='royin'))
def test_romanize_thai2rom(self):
self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')
self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')
self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')
self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')
self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')
self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),
'kan narong')
self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')
self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1
END_TOKEN = 3
self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().
detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
<|reserved_special_token_0|>
def test_pronunciate(self):
self.assertEqual(pronunciate(''), '')
remove('thai_w2p')
self.assertIsNotNone(pronunciate('คน', engine='w2p'))
self.assertIsNotNone(pronunciate('แมว', engine='w2p'))
self.assertIsNotNone(pronunciate('มข.', engine='w2p'))
self.assertIsNotNone(pronunciate('มช.', engine='w2p'))
self.assertIsNotNone(pronunciate('jks', engine='w2p'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTransliteratePackage(unittest.TestCase):
<|reserved_special_token_0|>
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine='royin'), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(romanize(word, engine='royin'), romanize(part1,
engine='royin') + romanize(part2, engine='royin'))
def test_romanize_thai2rom(self):
self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')
self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')
self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')
self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')
self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')
self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),
'kan narong')
self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')
self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1
END_TOKEN = 3
self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().
detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
def test_transliterate(self):
self.assertEqual(transliterate(''), '')
self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')
self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')
self.assertIsNotNone(transliterate('คน', engine='thaig2p'))
self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))
self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))
self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))
self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))
self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))
self.assertIsNotNone(trans_list('คน'))
self.assertIsNotNone(xsampa_list('คน'))
def test_pronunciate(self):
self.assertEqual(pronunciate(''), '')
remove('thai_w2p')
self.assertIsNotNone(pronunciate('คน', engine='w2p'))
self.assertIsNotNone(pronunciate('แมว', engine='w2p'))
self.assertIsNotNone(pronunciate('มข.', engine='w2p'))
self.assertIsNotNone(pronunciate('มช.', engine='w2p'))
self.assertIsNotNone(pronunciate('jks', engine='w2p'))
def test_puan(self):
self.assertEqual(puan('นาริน'), 'นิน-รา')
self.assertEqual(puan('นาริน', False), 'นินรา')
self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')
self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')
with self.assertRaises(ValueError):
self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',
'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',
'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',
'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}
_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]
class TestTransliteratePackage(unittest.TestCase):
def test_romanize(self):
self.assertEqual(romanize(None), '')
self.assertEqual(romanize(''), '')
self.assertEqual(romanize('แมว'), 'maeo')
self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine='royin'), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(romanize(word, engine='royin'), romanize(part1,
engine='royin') + romanize(part2, engine='royin'))
def test_romanize_thai2rom(self):
self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')
self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')
self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')
self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')
self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')
self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),
'kan narong')
self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')
self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1
END_TOKEN = 3
self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().
detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
def test_transliterate(self):
self.assertEqual(transliterate(''), '')
self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')
self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')
self.assertIsNotNone(transliterate('คน', engine='thaig2p'))
self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))
self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))
self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))
self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))
self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))
self.assertIsNotNone(trans_list('คน'))
self.assertIsNotNone(xsampa_list('คน'))
def test_pronunciate(self):
self.assertEqual(pronunciate(''), '')
remove('thai_w2p')
self.assertIsNotNone(pronunciate('คน', engine='w2p'))
self.assertIsNotNone(pronunciate('แมว', engine='w2p'))
self.assertIsNotNone(pronunciate('มข.', engine='w2p'))
self.assertIsNotNone(pronunciate('มช.', engine='w2p'))
self.assertIsNotNone(pronunciate('jks', engine='w2p'))
def test_puan(self):
self.assertEqual(puan('นาริน'), 'นิน-รา')
self.assertEqual(puan('นาริน', False), 'นินรา')
self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')
self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')
with self.assertRaises(ValueError):
self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')
<|reserved_special_token_1|>
import unittest
import torch
from pythainlp.transliterate import romanize, transliterate, pronunciate, puan
from pythainlp.transliterate.ipa import trans_list, xsampa_list
from pythainlp.transliterate.thai2rom import ThaiTransliterator
from pythainlp.corpus import remove
_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',
'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',
'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',
'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}
_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]
class TestTransliteratePackage(unittest.TestCase):
def test_romanize(self):
self.assertEqual(romanize(None), '')
self.assertEqual(romanize(''), '')
self.assertEqual(romanize('แมว'), 'maeo')
self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine='royin'), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(romanize(word, engine='royin'), romanize(part1,
engine='royin') + romanize(part2, engine='royin'))
def test_romanize_thai2rom(self):
self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')
self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')
self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')
self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')
self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')
self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),
'kan narong')
self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')
self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1
END_TOKEN = 3
self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()
.detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().
detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],
dtype=torch.long).cpu().detach().numpy().tolist())
def test_transliterate(self):
self.assertEqual(transliterate(''), '')
self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')
self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')
self.assertIsNotNone(transliterate('คน', engine='thaig2p'))
self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))
self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))
self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))
self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))
self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))
self.assertIsNotNone(trans_list('คน'))
self.assertIsNotNone(xsampa_list('คน'))
def test_pronunciate(self):
self.assertEqual(pronunciate(''), '')
remove('thai_w2p')
self.assertIsNotNone(pronunciate('คน', engine='w2p'))
self.assertIsNotNone(pronunciate('แมว', engine='w2p'))
self.assertIsNotNone(pronunciate('มข.', engine='w2p'))
self.assertIsNotNone(pronunciate('มช.', engine='w2p'))
self.assertIsNotNone(pronunciate('jks', engine='w2p'))
def test_puan(self):
self.assertEqual(puan('นาริน'), 'นิน-รา')
self.assertEqual(puan('นาริน', False), 'นินรา')
self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')
self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')
with self.assertRaises(ValueError):
self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import unittest
import torch
from pythainlp.transliterate import romanize, transliterate, pronunciate, puan
from pythainlp.transliterate.ipa import trans_list, xsampa_list
from pythainlp.transliterate.thai2rom import ThaiTransliterator
from pythainlp.corpus import remove
_BASIC_TESTS = {
None: "",
"": "",
"abc": "abc",
"หมอก": "mok",
"หาย": "hai",
"แมว": "maeo",
"เดือน": "duean",
"ดำ": "dam",
"ดู": "du",
"บัว": "bua",
"กก": "kok",
"พร": "phon",
"กร": "kon",
"กรร": "kan",
"กรรม": "kam",
# "กรม": "krom", # failed
"ฝ้าย": "fai",
"นพพร": "nopphon",
"อัก": "ak",
# "ทีปกร": "thipakon", # failed
# "ธรรพ์": "than", # failed
# "ธรรม": "tham", # failed
# "มหา": "maha", # failed
# "หยาก": "yak", # failed
# "อยาก": "yak", # failed
# "ยมก": "yamok", # failed
# "กลัว": "klua", # failed
# "บ้านไร่": "banrai", # failed
# "ชารินทร์": "charin", # failed
}
# these are set of two-syllable words,
# to test if the transliteration/romanization is consistent, say
# romanize(1+2) = romanize(1) + romanize(2)
_CONSISTENCY_TESTS = [
# ("กระจก", "กระ", "จก"), # failed
# ("ระเบิด", "ระ", "เบิด"), # failed
# ("หยากไย่", "หยาก", "ไย่"), # failed
("ตากใบ", "ตาก", "ใบ"),
# ("จัดสรร", "จัด", "สรร"), # failed
]
class TestTransliteratePackage(unittest.TestCase):
def test_romanize(self):
self.assertEqual(romanize(None), "")
self.assertEqual(romanize(""), "")
self.assertEqual(romanize("แมว"), "maeo")
self.assertEqual(romanize("แมว", engine="tltk"), "maeo")
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine="royin"), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(
romanize(word, engine="royin"),
(
romanize(part1, engine="royin")
+ romanize(part2, engine="royin")
),
)
def test_romanize_thai2rom(self):
self.assertEqual(romanize("แมว", engine="thai2rom"), "maeo")
self.assertEqual(romanize("บ้านไร่", engine="thai2rom"), "banrai")
self.assertEqual(romanize("สุนัข", engine="thai2rom"), "sunak")
self.assertEqual(romanize("นก", engine="thai2rom"), "nok")
self.assertEqual(romanize("ความอิ่ม", engine="thai2rom"), "khwam-im")
self.assertEqual(
romanize("กานต์ ณรงค์", engine="thai2rom"), "kan narong"
)
self.assertEqual(romanize("สกุนต์", engine="thai2rom"), "sakun")
self.assertEqual(romanize("ชารินทร์", engine="thai2rom"), "charin")
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1
END_TOKEN = 3 # END_TOKEN or <end> is represented by 3
self.assertListEqual(
transliterater._prepare_sequence_in("A")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertListEqual(
transliterater._prepare_sequence_in("♥")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertNotEqual(
transliterater._prepare_sequence_in("ก")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
def test_transliterate(self):
self.assertEqual(transliterate(""), "")
self.assertEqual(transliterate("แมว", "pyicu"), "mæw")
self.assertEqual(transliterate("คน", engine="ipa"), "kʰon")
self.assertIsNotNone(transliterate("คน", engine="thaig2p"))
self.assertIsNotNone(transliterate("แมว", engine="thaig2p"))
self.assertIsNotNone(transliterate("คน", engine="tltk_g2p"))
self.assertIsNotNone(transliterate("แมว", engine="tltk_g2p"))
self.assertIsNotNone(transliterate("คน", engine="tltk_ipa"))
self.assertIsNotNone(transliterate("แมว", engine="tltk_ipa"))
self.assertIsNotNone(trans_list("คน"))
self.assertIsNotNone(xsampa_list("คน"))
def test_pronunciate(self):
self.assertEqual(pronunciate(""), "")
remove("thai_w2p")
self.assertIsNotNone(pronunciate("คน", engine="w2p"))
self.assertIsNotNone(pronunciate("แมว", engine="w2p"))
self.assertIsNotNone(pronunciate("มข.", engine="w2p"))
self.assertIsNotNone(pronunciate("มช.", engine="w2p"))
self.assertIsNotNone(pronunciate("jks", engine="w2p"))
def test_puan(self):
self.assertEqual(puan("นาริน"), "นิน-รา")
self.assertEqual(puan("นาริน", False), "นินรา")
self.assertEqual(puan("แสงดีนะ"), "แสง-ดะ-นี")
self.assertEqual(puan("แสงดีนะ", False), "แสงดะนี")
with self.assertRaises(ValueError):
self.assertEqual(puan("สวัสดีครับ"), "สวัสดีครับ")
|
flexible
|
{
"blob_id": "486cfc4bb4b46d78715b11cba44656e8ba077c9b",
"index": 2551,
"step-1": "<mask token>\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n <mask token>\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n <mask token>\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n <mask token>\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n",
"step-3": "<mask token>\n_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',\n 'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',\n 'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',\n 'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}\n_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n\n def test_romanize(self):\n self.assertEqual(romanize(None), '')\n self.assertEqual(romanize(''), '')\n self.assertEqual(romanize('แมว'), 'maeo')\n self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n",
"step-4": "import unittest\nimport torch\nfrom pythainlp.transliterate import romanize, transliterate, pronunciate, puan\nfrom pythainlp.transliterate.ipa import trans_list, xsampa_list\nfrom pythainlp.transliterate.thai2rom import ThaiTransliterator\nfrom pythainlp.corpus import remove\n_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',\n 'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',\n 'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',\n 'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}\n_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n\n def test_romanize(self):\n self.assertEqual(romanize(None), '')\n self.assertEqual(romanize(''), '')\n self.assertEqual(romanize('แมว'), 'maeo')\n self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport unittest\n\nimport torch\nfrom pythainlp.transliterate import romanize, transliterate, pronunciate, puan\nfrom pythainlp.transliterate.ipa import trans_list, xsampa_list\nfrom pythainlp.transliterate.thai2rom import ThaiTransliterator\nfrom pythainlp.corpus import remove\n\n_BASIC_TESTS = {\n None: \"\",\n \"\": \"\",\n \"abc\": \"abc\",\n \"หมอก\": \"mok\",\n \"หาย\": \"hai\",\n \"แมว\": \"maeo\",\n \"เดือน\": \"duean\",\n \"ดำ\": \"dam\",\n \"ดู\": \"du\",\n \"บัว\": \"bua\",\n \"กก\": \"kok\",\n \"พร\": \"phon\",\n \"กร\": \"kon\",\n \"กรร\": \"kan\",\n \"กรรม\": \"kam\",\n # \"กรม\": \"krom\", # failed\n \"ฝ้าย\": \"fai\",\n \"นพพร\": \"nopphon\",\n \"อัก\": \"ak\",\n # \"ทีปกร\": \"thipakon\", # failed\n # \"ธรรพ์\": \"than\", # failed\n # \"ธรรม\": \"tham\", # failed\n # \"มหา\": \"maha\", # failed\n # \"หยาก\": \"yak\", # failed\n # \"อยาก\": \"yak\", # failed\n # \"ยมก\": \"yamok\", # failed\n # \"กลัว\": \"klua\", # failed\n # \"บ้านไร่\": \"banrai\", # failed\n # \"ชารินทร์\": \"charin\", # failed\n}\n\n# these are set of two-syllable words,\n# to test if the transliteration/romanization is consistent, say\n# romanize(1+2) = romanize(1) + romanize(2)\n_CONSISTENCY_TESTS = [\n # (\"กระจก\", \"กระ\", \"จก\"), # failed\n # (\"ระเบิด\", \"ระ\", \"เบิด\"), # failed\n # (\"หยากไย่\", \"หยาก\", \"ไย่\"), # failed\n (\"ตากใบ\", \"ตาก\", \"ใบ\"),\n # (\"จัดสรร\", \"จัด\", \"สรร\"), # failed\n]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n def test_romanize(self):\n self.assertEqual(romanize(None), \"\")\n self.assertEqual(romanize(\"\"), \"\")\n self.assertEqual(romanize(\"แมว\"), \"maeo\")\n self.assertEqual(romanize(\"แมว\", engine=\"tltk\"), \"maeo\")\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine=\"royin\"), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(\n romanize(word, engine=\"royin\"),\n (\n romanize(part1, engine=\"royin\")\n + romanize(part2, engine=\"royin\")\n ),\n )\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize(\"แมว\", engine=\"thai2rom\"), \"maeo\")\n self.assertEqual(romanize(\"บ้านไร่\", engine=\"thai2rom\"), \"banrai\")\n self.assertEqual(romanize(\"สุนัข\", engine=\"thai2rom\"), \"sunak\")\n self.assertEqual(romanize(\"นก\", engine=\"thai2rom\"), \"nok\")\n self.assertEqual(romanize(\"ความอิ่ม\", engine=\"thai2rom\"), \"khwam-im\")\n self.assertEqual(\n romanize(\"กานต์ ณรงค์\", engine=\"thai2rom\"), \"kan narong\"\n )\n self.assertEqual(romanize(\"สกุนต์\", engine=\"thai2rom\"), \"sakun\")\n self.assertEqual(romanize(\"ชารินทร์\", engine=\"thai2rom\"), \"charin\")\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n\n UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1\n END_TOKEN = 3 # END_TOKEN or <end> is represented by 3\n\n self.assertListEqual(\n transliterater._prepare_sequence_in(\"A\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n self.assertListEqual(\n transliterater._prepare_sequence_in(\"♥\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n self.assertNotEqual(\n transliterater._prepare_sequence_in(\"ก\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n def test_transliterate(self):\n self.assertEqual(transliterate(\"\"), \"\")\n self.assertEqual(transliterate(\"แมว\", \"pyicu\"), \"mæw\")\n self.assertEqual(transliterate(\"คน\", engine=\"ipa\"), \"kʰon\")\n self.assertIsNotNone(transliterate(\"คน\", engine=\"thaig2p\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"thaig2p\"))\n self.assertIsNotNone(transliterate(\"คน\", engine=\"tltk_g2p\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"tltk_g2p\"))\n self.assertIsNotNone(transliterate(\"คน\", engine=\"tltk_ipa\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"tltk_ipa\"))\n self.assertIsNotNone(trans_list(\"คน\"))\n self.assertIsNotNone(xsampa_list(\"คน\"))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(\"\"), \"\")\n remove(\"thai_w2p\")\n self.assertIsNotNone(pronunciate(\"คน\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"แมว\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"มข.\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"มช.\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"jks\", engine=\"w2p\"))\n\n def test_puan(self):\n self.assertEqual(puan(\"นาริน\"), \"นิน-รา\")\n self.assertEqual(puan(\"นาริน\", False), \"นินรา\")\n self.assertEqual(puan(\"แสงดีนะ\"), \"แสง-ดะ-นี\")\n self.assertEqual(puan(\"แสงดีนะ\", False), \"แสงดะนี\")\n with self.assertRaises(ValueError):\n self.assertEqual(puan(\"สวัสดีครับ\"), \"สวัสดีครับ\")\n",
"step-ids": [
6,
8,
10,
11,
12
]
}
|
[
6,
8,
10,
11,
12
] |
import sys
from random import randint
if len(sys.argv) != 2:
print "Usage: generate.py <number of orders>"
sys.exit(1)
n = int(sys.argv[1])
for i in range(0, n):
action = 'A'
orderid = i + 1
side = 'S' if (randint(0,1) == 0) else 'B'
quantity = randint(1,100)
price = randint(100,200)
print action + ',' + str(orderid) + ',' + side + ',' + str(quantity) + ',' + str(price)
|
normal
|
{
"blob_id": "6267c999d3cec051c33cbcde225ff7acaa6bff74",
"index": 5383,
"step-1": "import sys\nfrom random import randint\n\nif len(sys.argv) != 2:\n print \"Usage: generate.py <number of orders>\"\n sys.exit(1)\n\nn = int(sys.argv[1])\n\nfor i in range(0, n):\n action = 'A'\n orderid = i + 1\n side = 'S' if (randint(0,1) == 0) else 'B'\n quantity = randint(1,100)\n price = randint(100,200)\n\n print action + ',' + str(orderid) + ',' + side + ',' + str(quantity) + ',' + str(price)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Baidu:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, count):
cfg = ConfigParser.ConfigParser()
cfg.read('config/setting.conf')
self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))
self.savefile = cfg.get('global', 'savefile')
self.write_title = cfg.get('log', 'write_title')
self.write_name = cfg.get('log', 'write_name')
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Baidu:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, count):
cfg = ConfigParser.ConfigParser()
cfg.read('config/setting.conf')
self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))
self.savefile = cfg.get('global', 'savefile')
self.write_title = cfg.get('log', 'write_title')
self.write_name = cfg.get('log', 'write_name')
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
def search(self, key, page_pn):
page_num = str(page_pn / self.baidu_page_size + 1)
search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.
baidu_page_size) + '&pn=' + str(page_pn)
search_url = search_url.replace('key', key)
htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')
regex_page = '<span class="pc">' + page_num + '</span>'
page_compile = re.compile(regex_page)
page_result = page_compile.findall(htmlcontent)
if page_result:
pass
else:
self.my_status.baidu_search = False
return
regex_titleurl = (
'<div class="result c-container ".*<h3 class=".*"><a(?:[^\\<]*\\n[^\\<]*)href = "(?P<url>.+?)"(?:[^\\<]*\\n[^\\<]*)target="_blank"(?:[^\\<]*\\n[^\\<]*)>(?P<title>.+?)</a></h3>'
)
content = re.compile(regex_titleurl)
find_result = content.findall(htmlcontent)
print(
'\x1b[1;37;40m==========================百度 第%s页采集开始================\n'
% page_num)
if self.savefile == 'True':
logfile = open(key + '.txt', 'a')
for i in range(len(find_result)):
dr = re.compile('<[^>]+>', re.S)
title = dr.sub('', find_result[i][1])
realurl = self.my_data.get_baidu_realurl(find_result[i][0])
self.count.all_totals += 1
realurl = self.my_filter.filter_data(realurl, title)
if realurl != 'filter':
self.count.all_checked_totals += 1
print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))
if self.savefile == 'True':
have_url = 0
with open(key + '.txt', 'r') as foo:
for line in foo.readlines():
if realurl in line:
have_url = 1
if have_url == 0:
if self.write_title:
if self.write_name:
logfile.write(self.search_name +
realurl + ' ' + title + '\n')
else:
logfile.write(realurl + ' ' + title +
'\n')
elif self.write_name:
logfile.write(self.search_name + realurl + '\n'
)
else:
logfile.write(realurl + '\n')
else:
self.count.all_delete_totals += 1
else:
self.count.all_filter_totals += 1
if self.savefile == 'True':
logfile.close()
print('==========================百度 第%s页采集结束================\n' %
page_num)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Baidu:
baidu_page_size = 50
search_name = '[baidu]'
def __init__(self, count):
cfg = ConfigParser.ConfigParser()
cfg.read('config/setting.conf')
self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))
self.savefile = cfg.get('global', 'savefile')
self.write_title = cfg.get('log', 'write_title')
self.write_name = cfg.get('log', 'write_name')
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
def search(self, key, page_pn):
page_num = str(page_pn / self.baidu_page_size + 1)
search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.
baidu_page_size) + '&pn=' + str(page_pn)
search_url = search_url.replace('key', key)
htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')
regex_page = '<span class="pc">' + page_num + '</span>'
page_compile = re.compile(regex_page)
page_result = page_compile.findall(htmlcontent)
if page_result:
pass
else:
self.my_status.baidu_search = False
return
regex_titleurl = (
'<div class="result c-container ".*<h3 class=".*"><a(?:[^\\<]*\\n[^\\<]*)href = "(?P<url>.+?)"(?:[^\\<]*\\n[^\\<]*)target="_blank"(?:[^\\<]*\\n[^\\<]*)>(?P<title>.+?)</a></h3>'
)
content = re.compile(regex_titleurl)
find_result = content.findall(htmlcontent)
print(
'\x1b[1;37;40m==========================百度 第%s页采集开始================\n'
% page_num)
if self.savefile == 'True':
logfile = open(key + '.txt', 'a')
for i in range(len(find_result)):
dr = re.compile('<[^>]+>', re.S)
title = dr.sub('', find_result[i][1])
realurl = self.my_data.get_baidu_realurl(find_result[i][0])
self.count.all_totals += 1
realurl = self.my_filter.filter_data(realurl, title)
if realurl != 'filter':
self.count.all_checked_totals += 1
print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))
if self.savefile == 'True':
have_url = 0
with open(key + '.txt', 'r') as foo:
for line in foo.readlines():
if realurl in line:
have_url = 1
if have_url == 0:
if self.write_title:
if self.write_name:
logfile.write(self.search_name +
realurl + ' ' + title + '\n')
else:
logfile.write(realurl + ' ' + title +
'\n')
elif self.write_name:
logfile.write(self.search_name + realurl + '\n'
)
else:
logfile.write(realurl + '\n')
else:
self.count.all_delete_totals += 1
else:
self.count.all_filter_totals += 1
if self.savefile == 'True':
logfile.close()
print('==========================百度 第%s页采集结束================\n' %
page_num)
<|reserved_special_token_1|>
import urllib2
import re
import ConfigParser
from lib.filter import *
from lib.getdata import *
from lib.count import *
from lib.status import *
class Baidu:
baidu_page_size = 50
search_name = '[baidu]'
def __init__(self, count):
cfg = ConfigParser.ConfigParser()
cfg.read('config/setting.conf')
self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))
self.savefile = cfg.get('global', 'savefile')
self.write_title = cfg.get('log', 'write_title')
self.write_name = cfg.get('log', 'write_name')
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
def search(self, key, page_pn):
page_num = str(page_pn / self.baidu_page_size + 1)
search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.
baidu_page_size) + '&pn=' + str(page_pn)
search_url = search_url.replace('key', key)
htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')
regex_page = '<span class="pc">' + page_num + '</span>'
page_compile = re.compile(regex_page)
page_result = page_compile.findall(htmlcontent)
if page_result:
pass
else:
self.my_status.baidu_search = False
return
regex_titleurl = (
'<div class="result c-container ".*<h3 class=".*"><a(?:[^\\<]*\\n[^\\<]*)href = "(?P<url>.+?)"(?:[^\\<]*\\n[^\\<]*)target="_blank"(?:[^\\<]*\\n[^\\<]*)>(?P<title>.+?)</a></h3>'
)
content = re.compile(regex_titleurl)
find_result = content.findall(htmlcontent)
print(
'\x1b[1;37;40m==========================百度 第%s页采集开始================\n'
% page_num)
if self.savefile == 'True':
logfile = open(key + '.txt', 'a')
for i in range(len(find_result)):
dr = re.compile('<[^>]+>', re.S)
title = dr.sub('', find_result[i][1])
realurl = self.my_data.get_baidu_realurl(find_result[i][0])
self.count.all_totals += 1
realurl = self.my_filter.filter_data(realurl, title)
if realurl != 'filter':
self.count.all_checked_totals += 1
print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))
if self.savefile == 'True':
have_url = 0
with open(key + '.txt', 'r') as foo:
for line in foo.readlines():
if realurl in line:
have_url = 1
if have_url == 0:
if self.write_title:
if self.write_name:
logfile.write(self.search_name +
realurl + ' ' + title + '\n')
else:
logfile.write(realurl + ' ' + title +
'\n')
elif self.write_name:
logfile.write(self.search_name + realurl + '\n'
)
else:
logfile.write(realurl + '\n')
else:
self.count.all_delete_totals += 1
else:
self.count.all_filter_totals += 1
if self.savefile == 'True':
logfile.close()
print('==========================百度 第%s页采集结束================\n' %
page_num)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Project = https://github.com/super-l/search-url.git
# Author = superl
# Blog = www.superl.org QQ:86717375
# Team = Code Security Team(C.S.T) | 铭剑创鼎
import urllib2
import re
import ConfigParser
from lib.filter import *
from lib.getdata import *
from lib.count import *
from lib.status import *
class Baidu():
baidu_page_size = 50
search_name = '[baidu]'
def __init__(self,count) :
cfg = ConfigParser.ConfigParser()
cfg.read("config/setting.conf")
self.baidu_page_size = int(cfg.get("search", "baidu_page_size"))
self.savefile = cfg.get("global", "savefile")
self.write_title = cfg.get("log", "write_title")
self.write_name = cfg.get("log", "write_name")
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
#Get the web page source code
def search(self,key,page_pn):
#The number of baidu pages currently viewed
#page_num = page_pn/baidu_page_size
page_num = str(page_pn/self.baidu_page_size+1)
search_url = 'http://www.baidu.com/s?wd=key&rn='+str(self.baidu_page_size)+'&pn='+str(page_pn)
search_url = search_url.replace('key',key)
#print search_url
htmlcontent = self.my_data.get_pagehtml(search_url,'baidu')
regex_page = r'<span class="pc">'+page_num+'</span>'
page_compile = re.compile(regex_page)
page_result = page_compile.findall(htmlcontent)
if page_result:
pass
else:
self.my_status.baidu_search = False
return
regex_titleurl = r'<div class="result c-container ".*<h3 class=".*"><a(?:[^\<]*\n[^\<]*)href = "(?P<url>.+?)"(?:[^\<]*\n[^\<]*)target="_blank"(?:[^\<]*\n[^\<]*)>(?P<title>.+?)</a></h3>'
content = re.compile(regex_titleurl)
find_result = content.findall(htmlcontent)
print ("\033[1;37;40m==========================百度 第%s页采集开始================\n"%(page_num))
if self.savefile == 'True':
logfile = open(key+'.txt','a')
for i in range(len(find_result)):
dr = re.compile(r'<[^>]+>',re.S)
title = dr.sub('',find_result[i][1])
realurl = self.my_data.get_baidu_realurl(find_result[i][0])
self.count.all_totals+=1
realurl = self.my_filter.filter_data(realurl,title)
if realurl != "filter":
self.count.all_checked_totals+=1
print ("[ID]:%d [URL]:%s [TITLE]:%s"%(i,realurl,title))
if self.savefile == 'True':
have_url = 0
with open(key+'.txt','r') as foo:
for line in foo.readlines():
if realurl in line:
have_url = 1
if have_url ==0:
if self.write_title:
if self.write_name:
logfile.write(self.search_name+realurl+' '+title+'\n')
else:
logfile.write(realurl+' '+title+'\n')
else:
if self.write_name:
logfile.write(self.search_name+realurl+'\n')
else:
logfile.write(realurl+'\n')
else:
self.count.all_delete_totals+=1
else:
self.count.all_filter_totals+=1
if self.savefile == 'True':
logfile.close()
print ("==========================百度 第%s页采集结束================\n"%(page_num))
|
flexible
|
{
"blob_id": "b724b04c6303cc9021539ad7df5a198000491029",
"index": 5436,
"step-1": "<mask token>\n\n\nclass Baidu:\n <mask token>\n <mask token>\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Baidu:\n <mask token>\n <mask token>\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n def search(self, key, page_pn):\n page_num = str(page_pn / self.baidu_page_size + 1)\n search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.\n baidu_page_size) + '&pn=' + str(page_pn)\n search_url = search_url.replace('key', key)\n htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')\n regex_page = '<span class=\"pc\">' + page_num + '</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n regex_titleurl = (\n '<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\\\<]*\\\\n[^\\\\<]*)href = \"(?P<url>.+?)\"(?:[^\\\\<]*\\\\n[^\\\\<]*)target=\"_blank\"(?:[^\\\\<]*\\\\n[^\\\\<]*)>(?P<title>.+?)</a></h3>'\n )\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n print(\n '\\x1b[1;37;40m==========================百度 第%s页采集开始================\\n'\n % page_num)\n if self.savefile == 'True':\n logfile = open(key + '.txt', 'a')\n for i in range(len(find_result)):\n dr = re.compile('<[^>]+>', re.S)\n title = dr.sub('', find_result[i][1])\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n self.count.all_totals += 1\n realurl = self.my_filter.filter_data(realurl, title)\n if realurl != 'filter':\n self.count.all_checked_totals += 1\n print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))\n if self.savefile == 'True':\n have_url = 0\n with open(key + '.txt', 'r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url == 0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name +\n realurl + ' ' + title + '\\n')\n else:\n logfile.write(realurl + ' ' + title +\n '\\n')\n elif self.write_name:\n logfile.write(self.search_name + realurl + '\\n'\n )\n else:\n logfile.write(realurl + '\\n')\n else:\n self.count.all_delete_totals += 1\n else:\n self.count.all_filter_totals += 1\n if self.savefile == 'True':\n logfile.close()\n print('==========================百度 第%s页采集结束================\\n' %\n page_num)\n",
"step-3": "<mask token>\n\n\nclass Baidu:\n baidu_page_size = 50\n search_name = '[baidu]'\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n def search(self, key, page_pn):\n page_num = str(page_pn / self.baidu_page_size + 1)\n search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.\n baidu_page_size) + '&pn=' + str(page_pn)\n search_url = search_url.replace('key', key)\n htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')\n regex_page = '<span class=\"pc\">' + page_num + '</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n regex_titleurl = (\n '<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\\\<]*\\\\n[^\\\\<]*)href = \"(?P<url>.+?)\"(?:[^\\\\<]*\\\\n[^\\\\<]*)target=\"_blank\"(?:[^\\\\<]*\\\\n[^\\\\<]*)>(?P<title>.+?)</a></h3>'\n )\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n print(\n '\\x1b[1;37;40m==========================百度 第%s页采集开始================\\n'\n % page_num)\n if self.savefile == 'True':\n logfile = open(key + '.txt', 'a')\n for i in range(len(find_result)):\n dr = re.compile('<[^>]+>', re.S)\n title = dr.sub('', find_result[i][1])\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n self.count.all_totals += 1\n realurl = self.my_filter.filter_data(realurl, title)\n if realurl != 'filter':\n self.count.all_checked_totals += 1\n print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))\n if self.savefile == 'True':\n have_url = 0\n with open(key + '.txt', 'r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url == 0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name +\n realurl + ' ' + title + '\\n')\n else:\n logfile.write(realurl + ' ' + title +\n '\\n')\n elif self.write_name:\n logfile.write(self.search_name + realurl + '\\n'\n )\n else:\n logfile.write(realurl + '\\n')\n else:\n self.count.all_delete_totals += 1\n else:\n self.count.all_filter_totals += 1\n if self.savefile == 'True':\n logfile.close()\n print('==========================百度 第%s页采集结束================\\n' %\n page_num)\n",
"step-4": "import urllib2\nimport re\nimport ConfigParser\nfrom lib.filter import *\nfrom lib.getdata import *\nfrom lib.count import *\nfrom lib.status import *\n\n\nclass Baidu:\n baidu_page_size = 50\n search_name = '[baidu]'\n\n def __init__(self, count):\n cfg = ConfigParser.ConfigParser()\n cfg.read('config/setting.conf')\n self.baidu_page_size = int(cfg.get('search', 'baidu_page_size'))\n self.savefile = cfg.get('global', 'savefile')\n self.write_title = cfg.get('log', 'write_title')\n self.write_name = cfg.get('log', 'write_name')\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n def search(self, key, page_pn):\n page_num = str(page_pn / self.baidu_page_size + 1)\n search_url = 'http://www.baidu.com/s?wd=key&rn=' + str(self.\n baidu_page_size) + '&pn=' + str(page_pn)\n search_url = search_url.replace('key', key)\n htmlcontent = self.my_data.get_pagehtml(search_url, 'baidu')\n regex_page = '<span class=\"pc\">' + page_num + '</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n regex_titleurl = (\n '<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\\\<]*\\\\n[^\\\\<]*)href = \"(?P<url>.+?)\"(?:[^\\\\<]*\\\\n[^\\\\<]*)target=\"_blank\"(?:[^\\\\<]*\\\\n[^\\\\<]*)>(?P<title>.+?)</a></h3>'\n )\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n print(\n '\\x1b[1;37;40m==========================百度 第%s页采集开始================\\n'\n % page_num)\n if self.savefile == 'True':\n logfile = open(key + '.txt', 'a')\n for i in range(len(find_result)):\n dr = re.compile('<[^>]+>', re.S)\n title = dr.sub('', find_result[i][1])\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n self.count.all_totals += 1\n realurl = self.my_filter.filter_data(realurl, title)\n if realurl != 'filter':\n self.count.all_checked_totals += 1\n print('[ID]:%d [URL]:%s [TITLE]:%s' % (i, realurl, title))\n if self.savefile == 'True':\n have_url = 0\n with open(key + '.txt', 'r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url == 0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name +\n realurl + ' ' + title + '\\n')\n else:\n logfile.write(realurl + ' ' + title +\n '\\n')\n elif self.write_name:\n logfile.write(self.search_name + realurl + '\\n'\n )\n else:\n logfile.write(realurl + '\\n')\n else:\n self.count.all_delete_totals += 1\n else:\n self.count.all_filter_totals += 1\n if self.savefile == 'True':\n logfile.close()\n print('==========================百度 第%s页采集结束================\\n' %\n page_num)\n",
"step-5": "# -*- coding: utf-8 -*-\n# Project = https://github.com/super-l/search-url.git\n# Author = superl\n# Blog = www.superl.org QQ:86717375\n# Team = Code Security Team(C.S.T) | 铭剑创鼎\nimport urllib2\nimport re \nimport ConfigParser\n\nfrom lib.filter import *\nfrom lib.getdata import *\nfrom lib.count import *\nfrom lib.status import *\n\nclass Baidu():\n\n baidu_page_size = 50\n search_name = '[baidu]'\n\n def __init__(self,count) :\n cfg = ConfigParser.ConfigParser()\n cfg.read(\"config/setting.conf\")\n\n self.baidu_page_size = int(cfg.get(\"search\", \"baidu_page_size\"))\n self.savefile = cfg.get(\"global\", \"savefile\")\n self.write_title = cfg.get(\"log\", \"write_title\")\n self.write_name = cfg.get(\"log\", \"write_name\")\n self.my_filter = SupFilter()\n self.my_data = SupGetData()\n self.my_status = Supstatus()\n self.count = count\n\n\n #Get the web page source code\n def search(self,key,page_pn):\n #The number of baidu pages currently viewed\n #page_num = page_pn/baidu_page_size\n page_num = str(page_pn/self.baidu_page_size+1)\n\n search_url = 'http://www.baidu.com/s?wd=key&rn='+str(self.baidu_page_size)+'&pn='+str(page_pn)\n search_url = search_url.replace('key',key)\n #print search_url\n htmlcontent = self.my_data.get_pagehtml(search_url,'baidu')\n\n regex_page = r'<span class=\"pc\">'+page_num+'</span>'\n page_compile = re.compile(regex_page)\n page_result = page_compile.findall(htmlcontent)\n\n if page_result:\n pass\n else:\n self.my_status.baidu_search = False\n return\n\n regex_titleurl = r'<div class=\"result c-container \".*<h3 class=\".*\"><a(?:[^\\<]*\\n[^\\<]*)href = \"(?P<url>.+?)\"(?:[^\\<]*\\n[^\\<]*)target=\"_blank\"(?:[^\\<]*\\n[^\\<]*)>(?P<title>.+?)</a></h3>'\n\n content = re.compile(regex_titleurl)\n find_result = content.findall(htmlcontent)\n\n print (\"\\033[1;37;40m==========================百度 第%s页采集开始================\\n\"%(page_num))\n \n if self.savefile == 'True':\n logfile = open(key+'.txt','a')\n\n for i in range(len(find_result)):\n dr = re.compile(r'<[^>]+>',re.S)\n title = dr.sub('',find_result[i][1])\n\n realurl = self.my_data.get_baidu_realurl(find_result[i][0])\n\n self.count.all_totals+=1\n\n \n realurl = self.my_filter.filter_data(realurl,title)\n\n if realurl != \"filter\":\n self.count.all_checked_totals+=1\n\n print (\"[ID]:%d [URL]:%s [TITLE]:%s\"%(i,realurl,title))\n if self.savefile == 'True':\n have_url = 0\n with open(key+'.txt','r') as foo:\n for line in foo.readlines():\n if realurl in line:\n have_url = 1\n if have_url ==0:\n if self.write_title:\n if self.write_name:\n logfile.write(self.search_name+realurl+' '+title+'\\n')\n else:\n logfile.write(realurl+' '+title+'\\n')\n else:\n if self.write_name:\n logfile.write(self.search_name+realurl+'\\n')\n else:\n logfile.write(realurl+'\\n')\n else:\n self.count.all_delete_totals+=1 \n else:\n self.count.all_filter_totals+=1\n if self.savefile == 'True': \n logfile.close() \n print (\"==========================百度 第%s页采集结束================\\n\"%(page_num)) \n \n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def with_metaclass(meta, *bases):
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if PY2:
text_type = unicode
string_types = basestring,
else:
text_type = str
string_types = str,
def with_metaclass(meta, *bases):
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = basestring,
else:
text_type = str
string_types = str,
def with_metaclass(meta, *bases):
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
<|reserved_special_token_1|>
import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = basestring,
else:
text_type = str
string_types = str,
def with_metaclass(meta, *bases):
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = basestring,
else:
text_type = str
string_types = str,
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
flexible
|
{
"blob_id": "414cb9a173ac70ad9ad1fc540aec569321fd3f8b",
"index": 9477,
"step-1": "<mask token>\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n",
"step-2": "<mask token>\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n",
"step-3": "<mask token>\nPY2 = sys.version_info[0] == 2\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n",
"step-4": "import sys\nPY2 = sys.version_info[0] == 2\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n\n\n class metaclass(meta):\n\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\n\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n text_type = unicode\n string_types = basestring,\nelse:\n text_type = str\n string_types = str,\n\n\ndef with_metaclass(meta, *bases):\n # This requires a bit of explanation: the basic idea is to make a dummy\n # metaclass for one level of class instantiation that replaces itself with\n # the actual metaclass.\n class metaclass(meta):\n def __new__(cls, name, this_bases, d):\n return meta(name, bases, d)\n return type.__new__(metaclass, 'temporary_class', (), {})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Imports
import os
import time
import math
import random
from lib import *
def MT19937_keystream_generator(seed: int) -> bytes:
"""
Generate keystream for MT19937
"""
# Verify that the seed is atmost 16 bit long.
assert math.log2(seed) <= 16
prng = MT19937(seed)
while True:
number = prng.extract_number()
yield from number.to_bytes(4, "big")
def MT19937_CTR(string: str, seed: int) -> bytes:
"""
Encrypts a plaintext with MT19937 CTR Mode.
"""
# Verify that the seed is an integer.
assert isinstance(seed, int)
keystream = MT19937_keystream_generator(seed)
if len(string) == 0:
return b""
else:
return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])
def main():
plaintext = "Hello World!"
# append random characters before plainttext
string = b""
for _ in range(random.randint(0, 10)):
i = random.randint(33, 126)
string += chr(i).encode()
string += plaintext.encode()
seed = random.randint(1, 2**16)
print("> Seed value coded to be", seed)
cipher_bytes = MT19937_CTR(string, seed)
deciphered_bytes = MT19937_CTR(cipher_bytes, seed)
# verify if it can be decrypted
assert string == deciphered_bytes
#The number of possible keys is super small so you can just try them all. They even insist on it in the instructions: the cipher is using a 16-bits seed. It's kind of weird actually because from the specifications of MT19937 the seed seems to be 32 bits. Well even 32 bits should be small enough to crack, it would just take longer.
for seed in range(1, 2**16):
deciphered_bytes = MT19937_CTR(cipher_bytes, seed)
try:
assert string == deciphered_bytes
print("> Brute force successful.\nSeed:", seed)
break
except AssertionError:
continue
return
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "66b7d928bc2c98a12f7adb8a375ced21edce8333",
"index": 8492,
"step-1": "<mask token>\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef MT19937_keystream_generator(seed: int) ->bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n assert math.log2(seed) <= 16\n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, 'big')\n\n\ndef MT19937_CTR(string: str, seed: int) ->bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n assert isinstance(seed, int)\n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b''\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef MT19937_keystream_generator(seed: int) ->bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n assert math.log2(seed) <= 16\n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, 'big')\n\n\ndef MT19937_CTR(string: str, seed: int) ->bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n assert isinstance(seed, int)\n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b''\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport time\nimport math\nimport random\nfrom lib import *\n\n\ndef MT19937_keystream_generator(seed: int) ->bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n assert math.log2(seed) <= 16\n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, 'big')\n\n\ndef MT19937_CTR(string: str, seed: int) ->bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n assert isinstance(seed, int)\n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b''\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Imports\nimport os\nimport time\nimport math\nimport random\nfrom lib import *\n\ndef MT19937_keystream_generator(seed: int) -> bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n # Verify that the seed is atmost 16 bit long.\n assert math.log2(seed) <= 16\n \n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, \"big\")\n \ndef MT19937_CTR(string: str, seed: int) -> bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n # Verify that the seed is an integer.\n assert isinstance(seed, int)\n \n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b\"\"\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n \ndef main():\n\n\tplaintext = \"Hello World!\"\n\n\t# append random characters before plainttext\n\tstring = b\"\"\n\tfor _ in range(random.randint(0, 10)):\n\t\ti = random.randint(33, 126)\n\t\tstring += chr(i).encode()\n\tstring += plaintext.encode()\n\n\tseed = random.randint(1, 2**16)\n\tprint(\"> Seed value coded to be\", seed)\n\tcipher_bytes = MT19937_CTR(string, seed)\n\tdeciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n\n\t# verify if it can be decrypted\n\tassert string == deciphered_bytes\n\n\t#The number of possible keys is super small so you can just try them all. They even insist on it in the instructions: the cipher is using a 16-bits seed. It's kind of weird actually because from the specifications of MT19937 the seed seems to be 32 bits. Well even 32 bits should be small enough to crack, it would just take longer.\n\tfor seed in range(1, 2**16):\n\t\tdeciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n\t\ttry:\n\t\t assert string == deciphered_bytes\n\t\t print(\"> Brute force successful.\\nSeed:\", seed)\n\t\t break\n\t\texcept AssertionError:\n\t\t continue\n\t\t \n\treturn\n\t\nif __name__==\"__main__\":\n\tmain()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#!/usr/bin/python
debug = 0
if debug == 1:
limit = [8,20]
n = 3
p = [[2,10],[10,12],[8,30],[1,5]]
#n = 1
# p = [[8,30]]
print limit
print n
print p
def isIn(arr):
if arr[0] > limit[1] or arr[1] < limit[0] or \
arr[1] == 0:
return False
else:
return True
def overlapNum():
count = 0
maxN = 0
minN = 10001
global p
global limit
if debug !=1:
limit = []
p = []
n = 0
i = 0
s = raw_input().split(" ")
limit = map(int,s)
n = input()
while i<n:
s = raw_input().split(" ")
p.append([int(s[0]),int(s[1])])
i = i + 1
if n == 0:
print 0
print 0
return
p = filter(isIn,p) #Filtered out those not in limit scale
#add 0,1 to the start and end time
l = []
for i in range(len(p)):
l.append((p[i][0],0))
l.append((p[i][1],1))
#sort
l = sorted(l)
#count 0 and 1
if limit[1] == 0 or len(l) == 0:
print 0
print 0
return
if l[0][0] > limit[0] or l[-1][0] < limit[1]:
minN = 0
for k in l:
if k[1] == 0:
count = count + 1
maxN = max(maxN,count)
if minN != 0:
minN = count
else: #k[1] == 1
if k[0] < limit[1]:
count = count -1
if minN != 0:
minN = min(minN,count)
if minN >= 10001:
print 0
else:
print minN
print maxN
return
if __name__ == "__main__":
overlapNum()
|
normal
|
{
"blob_id": "c8d27965df83eb3e673b3857ee700a8474826335",
"index": 3895,
"step-1": "#!/usr/bin/python\n\n\ndebug = 0\n\nif debug == 1:\n limit = [8,20]\n n = 3\n p = [[2,10],[10,12],[8,30],[1,5]]\n #n = 1\n # p = [[8,30]] \n print limit\n print n\n print p\n\ndef isIn(arr):\n\n if arr[0] > limit[1] or arr[1] < limit[0] or \\\n arr[1] == 0:\n return False\n else:\n return True\n\ndef overlapNum():\n\n count = 0\n maxN = 0\n minN = 10001\n\n global p\n global limit\n if debug !=1: \n limit = []\n p = []\n n = 0\n i = 0\n\n s = raw_input().split(\" \")\n limit = map(int,s) \n n = input()\n\n while i<n:\n s = raw_input().split(\" \")\n p.append([int(s[0]),int(s[1])])\n i = i + 1\n if n == 0:\n print 0\n print 0\n return\n\n p = filter(isIn,p) #Filtered out those not in limit scale\n\n #add 0,1 to the start and end time\n l = []\n for i in range(len(p)):\n l.append((p[i][0],0))\n l.append((p[i][1],1))\n\n #sort\n l = sorted(l)\n\n #count 0 and 1\n if limit[1] == 0 or len(l) == 0:\n print 0\n print 0\n return\n\n if l[0][0] > limit[0] or l[-1][0] < limit[1]:\n minN = 0\n\n for k in l:\n if k[1] == 0:\n count = count + 1\n maxN = max(maxN,count)\n if minN != 0:\n minN = count\n else: #k[1] == 1\n if k[0] < limit[1]:\n count = count -1\n if minN != 0:\n minN = min(minN,count)\n\n if minN >= 10001:\n print 0\n else:\n print minN\n print maxN\n return\n\nif __name__ == \"__main__\":\n overlapNum()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
strings = ['(())())', '(((()())()', '(()())((()))', '((()()(()))(((())))()', '()()()()(()()())()', '(()((())()(']
#print(string[0])
'''
for i in string:
testlist = []
for j in string[i]:
if j == ')':
if
'''
def isVPS(phrase):
testlist = []
for char in phrase:
if char == '(':
testlist.append(char)
else:
if len(testlist) == 0:
#return False
return 'NO'
else:
testlist.pop()
if len(testlist) == 0:
#return True
return 'YES'
else:
#return False
return 'NO'
for string in strings:
print(isVPS(string))
#print(isVPS(string[0]))
|
normal
|
{
"blob_id": "d9f055301f050eea4281ce418974546c1245ac7e",
"index": 4621,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef isVPS(phrase):\n testlist = []\n for char in phrase:\n if char == '(':\n testlist.append(char)\n elif len(testlist) == 0:\n return 'NO'\n else:\n testlist.pop()\n if len(testlist) == 0:\n return 'YES'\n else:\n return 'NO'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isVPS(phrase):\n testlist = []\n for char in phrase:\n if char == '(':\n testlist.append(char)\n elif len(testlist) == 0:\n return 'NO'\n else:\n testlist.pop()\n if len(testlist) == 0:\n return 'YES'\n else:\n return 'NO'\n\n\nfor string in strings:\n print(isVPS(string))\n",
"step-4": "strings = ['(())())', '(((()())()', '(()())((()))', '((()()(()))(((())))()',\n '()()()()(()()())()', '(()((())()(']\n<mask token>\n\n\ndef isVPS(phrase):\n testlist = []\n for char in phrase:\n if char == '(':\n testlist.append(char)\n elif len(testlist) == 0:\n return 'NO'\n else:\n testlist.pop()\n if len(testlist) == 0:\n return 'YES'\n else:\n return 'NO'\n\n\nfor string in strings:\n print(isVPS(string))\n",
"step-5": "strings = ['(())())', '(((()())()', '(()())((()))', '((()()(()))(((())))()', '()()()()(()()())()', '(()((())()(']\n\n#print(string[0])\n'''\nfor i in string:\n testlist = []\n for j in string[i]:\n if j == ')':\n if \n'''\n\ndef isVPS(phrase):\n testlist = []\n for char in phrase:\n if char == '(':\n testlist.append(char)\n else:\n if len(testlist) == 0:\n #return False\n return 'NO'\n else:\n testlist.pop()\n if len(testlist) == 0:\n #return True\n return 'YES'\n else:\n #return False\n return 'NO'\n\nfor string in strings:\n print(isVPS(string))\n#print(isVPS(string[0]))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def home(request):
blogs = Blog.objects
return render(request, 'home.html', {'blogs': blogs})
def detail(request, blog_id):
blog_detail = get_object_or_404(Blog, pk=blog_id)
return render(request, 'detail.html', {'blog': blog_detail})
<|reserved_special_token_0|>
def create(request):
blog = Blog()
blog.title = request.GET['title']
blog.body = request.GET['body']
blog.pub_date = timezone.datetime.now()
blog.save()
return redirect('/blog/' + str(blog.id))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def home(request):
blogs = Blog.objects
return render(request, 'home.html', {'blogs': blogs})
def detail(request, blog_id):
blog_detail = get_object_or_404(Blog, pk=blog_id)
return render(request, 'detail.html', {'blog': blog_detail})
def new(request):
return render(request, 'new.html')
def create(request):
blog = Blog()
blog.title = request.GET['title']
blog.body = request.GET['body']
blog.pub_date = timezone.datetime.now()
blog.save()
return redirect('/blog/' + str(blog.id))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Blog)
def home(request):
blogs = Blog.objects
return render(request, 'home.html', {'blogs': blogs})
def detail(request, blog_id):
blog_detail = get_object_or_404(Blog, pk=blog_id)
return render(request, 'detail.html', {'blog': blog_detail})
def new(request):
return render(request, 'new.html')
def create(request):
blog = Blog()
blog.title = request.GET['title']
blog.body = request.GET['body']
blog.pub_date = timezone.datetime.now()
blog.save()
return redirect('/blog/' + str(blog.id))
<|reserved_special_token_1|>
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import admin
from .models import Blog
from django.utils import timezone
admin.site.register(Blog)
def home(request):
blogs = Blog.objects
return render(request, 'home.html', {'blogs': blogs})
def detail(request, blog_id):
blog_detail = get_object_or_404(Blog, pk=blog_id)
return render(request, 'detail.html', {'blog': blog_detail})
def new(request):
return render(request, 'new.html')
def create(request):
blog = Blog()
blog.title = request.GET['title']
blog.body = request.GET['body']
blog.pub_date = timezone.datetime.now()
blog.save()
return redirect('/blog/' + str(blog.id))
<|reserved_special_token_1|>
from django.shortcuts import render,get_object_or_404, redirect
from django.contrib import admin #어드민 쓸꺼면 써야됨
from .models import Blog #앱을 가지고 오겠다는거
from django.utils import timezone
admin.site.register(Blog) #블로그 형식을 가져와 등록하겠다.
# Create your views here.
def home(request):
blogs = Blog.objects
return render(request,'home.html',{'blogs':blogs})
def detail(request,blog_id):
blog_detail= get_object_or_404(Blog,pk=blog_id)
return render(request,'detail.html',{'blog': blog_detail})
def new(request):
return render(request,'new.html')
def create(request):
blog=Blog()
blog.title=request.GET['title']
blog.body=request.GET['body']
blog.pub_date=timezone.datetime.now()
blog.save()
return redirect('/blog/'+str(blog.id))
|
flexible
|
{
"blob_id": "bc25338612f525f616fb26c64d8b36667d297d40",
"index": 3921,
"step-1": "<mask token>\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\n<mask token>\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-2": "<mask token>\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\ndef new(request):\n return render(request, 'new.html')\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-3": "<mask token>\nadmin.site.register(Blog)\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\ndef new(request):\n return render(request, 'new.html')\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-4": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import admin\nfrom .models import Blog\nfrom django.utils import timezone\nadmin.site.register(Blog)\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\ndef new(request):\n return render(request, 'new.html')\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-5": "from django.shortcuts import render,get_object_or_404, redirect\nfrom django.contrib import admin #어드민 쓸꺼면 써야됨\nfrom .models import Blog #앱을 가지고 오겠다는거\nfrom django.utils import timezone\n\nadmin.site.register(Blog) #블로그 형식을 가져와 등록하겠다.\n# Create your views here.\ndef home(request):\n blogs = Blog.objects\n return render(request,'home.html',{'blogs':blogs})\n\ndef detail(request,blog_id):\n blog_detail= get_object_or_404(Blog,pk=blog_id)\n return render(request,'detail.html',{'blog': blog_detail})\n\ndef new(request):\n return render(request,'new.html')\n\ndef create(request):\n blog=Blog()\n blog.title=request.GET['title']\n blog.body=request.GET['body']\n blog.pub_date=timezone.datetime.now()\n blog.save()\n return redirect('/blog/'+str(blog.id))",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from getMerriamWebster import searchMerriamWebster
from searchWikipedia import searchWikipedia
from synonyms import searchSynonyms
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {"across": {}, "down":{}}
self.answers = answers
self.gridIndex = gridIndex
def setDomains(self):
for down in self.clues["down"]:
self.domains["down"][down] = self.search(self.clues["down"][down])
for across in self.clues["across"]:
self.domains["across"][across] = self.search(self.clues["across"][across])
#======================== CHEAT =============================
#self.cheat()
def getClueList(self, clue):
clueList = [clue]
return clueList
def search(self, clue):
domain = set()
wiki_set = set()
synonym_set = set()
toSearch = clue
"""
print("Google search for:", toSearch)
try:
domain = domain + self.getGoogle(toSearch)
except:
print("An exception occurred")
"""
print("Wikipedia search for:", toSearch)
try:
wiki_set = wiki_set | self.getWiki(toSearch)
except:
print("An exception occurred")
print("Synonym search from Datamuse and Merriam-Webster for:", toSearch)
try:
synonym_set = synonym_set | self.getSynonyms(toSearch)
except:
print("An exception occurred")
"""
print("Merriam Webster search for:", toSearch)
try:
merriam_set = merriam_set | self.getMerriam(toSearch)
except:
print("An exception occurred")
"""
domain = domain.union(wiki_set, synonym_set)
return ' '.join(str(e) for e in domain) #''.join(str(e) for e in words)
def getGoogle(self, toSearch):
return "toSearch"
def getWiki(self, toSearch):
return searchWikipedia(toSearch)
def getMerriam(self,toSearch):
return searchMerriamWebster(toSearch)
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues["across"], self.clues["down"])
def cheat(self):
for across in self.clues["across"]:
for row in range(0,5):
for col in range(0,5):
if self.gridIndex[row][col] == across:
answer = ""
for colIn in range(0,5):
if self.answers[row][colIn] != "-":
answer = answer + self.answers[row][colIn]
self.domains["across"][across] = self.domains["across"][across] + " " + answer
#print(answer)
for down in self.clues["down"]:
for row in range(0,5):
for col in range(0,5):
if self.gridIndex[row][col] == down:
answer = ""
for rowIn in range(0,5):
if self.answers[rowIn][col] != "-":
answer = answer + self.answers[rowIn][col]
self.domains["down"][down] = self.domains["down"][down] + " " + answer
#print(answer)
"""
scraping = Scraping()
scraping.setDomains()
print(scraping.domains)
"""
|
normal
|
{
"blob_id": "138abb40fda0f19b4a74a294d5cd0dd326dc59ce",
"index": 7722,
"step-1": "<mask token>\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n <mask token>\n <mask token>\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print('Wikipedia search for:', toSearch)\n try:\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print('An exception occurred')\n print('Synonym search from Datamuse and Merriam-Webster for:', toSearch\n )\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print('An exception occurred')\n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain)\n <mask token>\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n <mask token>\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n\n def setDomains(self):\n for down in self.clues['down']:\n self.domains['down'][down] = self.search(self.clues['down'][down])\n for across in self.clues['across']:\n self.domains['across'][across] = self.search(self.clues[\n 'across'][across])\n <mask token>\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print('Wikipedia search for:', toSearch)\n try:\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print('An exception occurred')\n print('Synonym search from Datamuse and Merriam-Webster for:', toSearch\n )\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print('An exception occurred')\n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain)\n\n def getGoogle(self, toSearch):\n return 'toSearch'\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n\n def getMerriam(self, toSearch):\n return searchMerriamWebster(toSearch)\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-4": "from getMerriamWebster import searchMerriamWebster\nfrom searchWikipedia import searchWikipedia\nfrom synonyms import searchSynonyms\n\n\nclass Scraping:\n\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {'across': {}, 'down': {}}\n self.answers = answers\n self.gridIndex = gridIndex\n\n def setDomains(self):\n for down in self.clues['down']:\n self.domains['down'][down] = self.search(self.clues['down'][down])\n for across in self.clues['across']:\n self.domains['across'][across] = self.search(self.clues[\n 'across'][across])\n\n def getClueList(self, clue):\n clueList = [clue]\n return clueList\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print('Wikipedia search for:', toSearch)\n try:\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print('An exception occurred')\n print('Synonym search from Datamuse and Merriam-Webster for:', toSearch\n )\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print('An exception occurred')\n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain)\n\n def getGoogle(self, toSearch):\n return 'toSearch'\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n\n def getMerriam(self, toSearch):\n return searchMerriamWebster(toSearch)\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues['across'], self.clues[\n 'down'])\n\n def cheat(self):\n for across in self.clues['across']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == across:\n answer = ''\n for colIn in range(0, 5):\n if self.answers[row][colIn] != '-':\n answer = answer + self.answers[row][colIn]\n self.domains['across'][across] = self.domains['across'\n ][across] + ' ' + answer\n for down in self.clues['down']:\n for row in range(0, 5):\n for col in range(0, 5):\n if self.gridIndex[row][col] == down:\n answer = ''\n for rowIn in range(0, 5):\n if self.answers[rowIn][col] != '-':\n answer = answer + self.answers[rowIn][col]\n self.domains['down'][down] = self.domains['down'][down\n ] + ' ' + answer\n\n\n<mask token>\n",
"step-5": "from getMerriamWebster import searchMerriamWebster\nfrom searchWikipedia import searchWikipedia\nfrom synonyms import searchSynonyms\n\nclass Scraping:\n def __init__(self, clues, answers, gridIndex):\n self.clues = clues\n self.domains = {\"across\": {}, \"down\":{}}\n self.answers = answers\n self.gridIndex = gridIndex\n\n def setDomains(self):\n for down in self.clues[\"down\"]:\n self.domains[\"down\"][down] = self.search(self.clues[\"down\"][down])\n for across in self.clues[\"across\"]:\n self.domains[\"across\"][across] = self.search(self.clues[\"across\"][across])\n #======================== CHEAT =============================\n #self.cheat()\n\n def getClueList(self, clue):\n clueList = [clue]\n return clueList\n\n def search(self, clue):\n domain = set()\n wiki_set = set()\n synonym_set = set()\n toSearch = clue\n \"\"\"\n print(\"Google search for:\", toSearch)\n try:\n domain = domain + self.getGoogle(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\"\n print(\"Wikipedia search for:\", toSearch)\n try:\n\n wiki_set = wiki_set | self.getWiki(toSearch)\n except:\n print(\"An exception occurred\")\n \n print(\"Synonym search from Datamuse and Merriam-Webster for:\", toSearch)\n try:\n synonym_set = synonym_set | self.getSynonyms(toSearch)\n except:\n print(\"An exception occurred\")\n \n \"\"\"\n print(\"Merriam Webster search for:\", toSearch)\n try:\n merriam_set = merriam_set | self.getMerriam(toSearch)\n except:\n print(\"An exception occurred\")\n \"\"\" \n domain = domain.union(wiki_set, synonym_set)\n return ' '.join(str(e) for e in domain) #''.join(str(e) for e in words)\n\n def getGoogle(self, toSearch):\n\n return \"toSearch\"\n\n def getWiki(self, toSearch):\n return searchWikipedia(toSearch)\n\n def getMerriam(self,toSearch):\n return searchMerriamWebster(toSearch)\n\n def getSynonyms(self, toSearch):\n return searchSynonyms(toSearch, self.clues[\"across\"], self.clues[\"down\"])\n\n def cheat(self):\n for across in self.clues[\"across\"]:\n \n for row in range(0,5):\n for col in range(0,5):\n if self.gridIndex[row][col] == across:\n answer = \"\"\n for colIn in range(0,5):\n if self.answers[row][colIn] != \"-\":\n answer = answer + self.answers[row][colIn]\n self.domains[\"across\"][across] = self.domains[\"across\"][across] + \" \" + answer\n #print(answer)\n\n for down in self.clues[\"down\"]:\n \n for row in range(0,5):\n for col in range(0,5):\n if self.gridIndex[row][col] == down:\n answer = \"\"\n for rowIn in range(0,5):\n if self.answers[rowIn][col] != \"-\":\n answer = answer + self.answers[rowIn][col]\n self.domains[\"down\"][down] = self.domains[\"down\"][down] + \" \" + answer\n #print(answer)\n\n\n\"\"\"\nscraping = Scraping()\nscraping.setDomains()\nprint(scraping.domains)\n\"\"\"",
"step-ids": [
4,
6,
9,
11,
12
]
}
|
[
4,
6,
9,
11,
12
] |
<|reserved_special_token_0|>
class illumination(object):
<|reserved_special_token_0|>
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis
=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=
darkframePath, flip_image_axes=flip_image_across_axis,
show_image=show_image, save_image=save_image, save_img_type=
save_img_type, savePath=savePath, savename=savename, save_plot=
save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name=
'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=
None, vertical_pixel_shift_speed=5e-07,
horizontal_pixel_shift_speed=1e-07,
horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode
=False, acquisition_mode='kinetic', triggering='internal',
readout_mode='image', pixels=512, pixel_size=1.6e-05):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = pixels, pixels
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = self.pixels[0] * pixel_size, self.pixels[1
] * pixel_size
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None,
magnification=None, basePath=None, channel_height=None,
illumination=None, wavelength=None, microgrid=None,
auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,
field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError(
'A wavelength is required via the <illumination> class or <wavelength> input parameter'
)
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation,
depth_of_field=self.depth_of_field, show_depth_plot=
show_depth_plot, save_depth_plot=save_depth_plot, basePath=
basePath, savename=None, channel_height=channel_height,
objective=self.magnification)
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=1.6e-05, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = (self._wavelength * n / self.
numerical_aperture ** 2 + e * n / (self.magnification * self.
numerical_aperture))
def calculate_depth_of_correlation(self, eps=0.01):
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,
dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError(
'Need objective.microgrid property in order to calculate scaling factor'
)
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None,
feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError('No grid*.tif files found in {}'.format(
self.gridPath))
img_grid = np.zeros(shape=(512, 512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None, diameter=None,
fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials = materials
self.concentration = concentration
self.electrophoretic_mobility = electrophoretic_mobility
self.zeta = zeta
self.diameter = diameter
if diameter:
k_b = 1.3806e-23
T = 298
mu = 0.001
self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)
self.fluorescence_spectra = fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material
=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi * self.diameter ** 2 / 4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = (material.density * g * self.
height_of_reservoir)
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None,
onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir = fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths = passing_wavelengths
self.reflectivity = reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None,
index_of_refraction=None, transparency=None, fluorescence_spectra=
None, permittivity=None, conductivity=None, thickness=None,
youngs_modulus=None, poissons_ratio=None, density=None,
dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=
None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
self.name = name
self.length = length
self.width = width
self.thickness = thickness
self.density = density
self.concentration = concentration
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density * 1e+18
self.Ka = Ka
self.Kb = Kb
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None,
conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
self.name = name
self.species = species
self.concentration = concentration
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10 ** -pH * 1000.0
self.valence = valence
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-09
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class bpe(object):
<|reserved_special_token_0|>
class optics(object):
def __init__(self, microscope, fluorescent_particles=None,
calibration_grid=None, pixel_to_micron_scaling=None):
self.microscope = microscope
self.fluorescent_particles = fluorescent_particles
self.calibration_grid = calibration_grid
if self.microscope.objective.magnification == 50:
self.pixel_to_micron_scaling = 0.6
elif self.microscope.objective.magnification == 20:
self.pixel_to_micron_scaling = 1.55
else:
raise ValueError(
'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'
)
if pixel_to_micron_scaling is not None:
print(
'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'
.format(self.pixel_to_micron_scaling, self.microscope.
objective.magnification))
"""
--- I THINK THIS SECTION IS DEPRECATED ---
Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have
permanently figured out the correct scaling.
if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:
self.pixel_to_micron = microscope.objective.pixel_to_micron
elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:
raise ValueError("Conflicting scaling factors: microscope.objective={}, optics={}".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))
elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:
self.pixel_to_micron = pixel_to_micron_scaling
"""
class illumination(object):
def __init__(self, basePath=None, source=None, excitation=None,
emission=None, dichroic=None, illumination_distribution=None,
calculate_illumination_distribution=False, illumPath=None,
illumSavePath=None, illumSaveName=None, showIllumPlot=False,
save_txt=False, save_plot=False, save_image=False):
"""
details about the optical setup
:param source:
:param excitation:
:param emission:
:param dichroic:
"""
self.basePath = basePath
self.source = source
self.excitation_wavelength = excitation
self.emission_wavelength = emission
self.dichroic = dichroic
if illumination_distribution is not None:
self.illumination_distribution = illumination_distribution
elif illumPath is not None:
flatfield = io.imread(illumPath, plugin='tifffile')
if len(np.shape(flatfield)) > 2:
flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),
dtype='uint16')
self.illumination_distribution = flatfield
elif calculate_illumination_distribution and illumination_distribution is None:
self.illumination_distribution = measureIlluminationDistributionXY(
basePath=self.basePath, illumPath=illumPath, show_image=
showIllumPlot, save_image=save_image, save_img_type='.tif',
save_txt=save_txt, show_plot=showIllumPlot, save_plot=
save_plot, savePath=illumSavePath, savename=illumSaveName)
else:
self.illumination_distribution = illumination_distribution
self.flatfield = self.illumination_distribution
if self.flatfield is not None:
self.flatfield_mean = np.mean(self.flatfield)
self.flatfield_std = np.std(self.flatfield)
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis
=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=
darkframePath, flip_image_axes=flip_image_across_axis,
show_image=show_image, save_image=save_image, save_img_type=
save_img_type, savePath=savePath, savename=savename, save_plot=
save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name=
'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=
None, vertical_pixel_shift_speed=5e-07,
horizontal_pixel_shift_speed=1e-07,
horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode
=False, acquisition_mode='kinetic', triggering='internal',
readout_mode='image', pixels=512, pixel_size=1.6e-05):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = pixels, pixels
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = self.pixels[0] * pixel_size, self.pixels[1
] * pixel_size
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None,
magnification=None, basePath=None, channel_height=None,
illumination=None, wavelength=None, microgrid=None,
auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,
field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError(
'A wavelength is required via the <illumination> class or <wavelength> input parameter'
)
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation,
depth_of_field=self.depth_of_field, show_depth_plot=
show_depth_plot, save_depth_plot=save_depth_plot, basePath=
basePath, savename=None, channel_height=channel_height,
objective=self.magnification)
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=1.6e-05, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = (self._wavelength * n / self.
numerical_aperture ** 2 + e * n / (self.magnification * self.
numerical_aperture))
def calculate_depth_of_correlation(self, eps=0.01):
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,
dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError(
'Need objective.microgrid property in order to calculate scaling factor'
)
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None,
feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError('No grid*.tif files found in {}'.format(
self.gridPath))
img_grid = np.zeros(shape=(512, 512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None, diameter=None,
fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials = materials
self.concentration = concentration
self.electrophoretic_mobility = electrophoretic_mobility
self.zeta = zeta
self.diameter = diameter
if diameter:
k_b = 1.3806e-23
T = 298
mu = 0.001
self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)
self.fluorescence_spectra = fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material
=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi * self.diameter ** 2 / 4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = (material.density * g * self.
height_of_reservoir)
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None,
onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir = fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths = passing_wavelengths
self.reflectivity = reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None,
index_of_refraction=None, transparency=None, fluorescence_spectra=
None, permittivity=None, conductivity=None, thickness=None,
youngs_modulus=None, poissons_ratio=None, density=None,
dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=
None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
self.name = name
self.length = length
self.width = width
self.thickness = thickness
self.density = density
self.concentration = concentration
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density * 1e+18
self.Ka = Ka
self.Kb = Kb
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None,
conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
self.name = name
self.species = species
self.concentration = concentration
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10 ** -pH * 1000.0
self.valence = valence
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-09
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class chip(object):
<|reserved_special_token_0|>
class channel(object):
def __init__(self, length=None, width=None, height=None,
material_bottom_wall_surface=None, material_top_wall_surface=None,
material_fluid=None):
"""
Everything important about the chip
"""
self.length = length
self.width = width
self.height = height
self.material_bottom_wall_surface = material_bottom_wall_surface
self.material_top_wall_surface = material_top_wall_surface
self.material_fluid = material_fluid
class bpe(object):
def __init__(self, length=None, width=None, height=None, material=None,
adhesion_material=None, dielectric_coating=None):
"""
Everything important about the chip
"""
self.length = length
self.linspace_x = np.linspace(-length / 2, length / 2, num=100)
self.width = width
self.height = height
self.material = material
if self.material.thickness:
if self.material.thickness != self.height:
raise ValueError('BPE height must equal BPE material thickness'
)
self.adhesion_material = adhesion_material
if dielectric_coating:
self.dielectric_coating = dielectric_coating
else:
self.dielectric_coating = material_solid(name='no_dielectric',
permittivity=1, thickness=1e-12, Ka=6, Kb=2,
reaction_site_density=5)
class optics(object):
def __init__(self, microscope, fluorescent_particles=None,
calibration_grid=None, pixel_to_micron_scaling=None):
self.microscope = microscope
self.fluorescent_particles = fluorescent_particles
self.calibration_grid = calibration_grid
if self.microscope.objective.magnification == 50:
self.pixel_to_micron_scaling = 0.6
elif self.microscope.objective.magnification == 20:
self.pixel_to_micron_scaling = 1.55
else:
raise ValueError(
'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'
)
if pixel_to_micron_scaling is not None:
print(
'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'
.format(self.pixel_to_micron_scaling, self.microscope.
objective.magnification))
"""
--- I THINK THIS SECTION IS DEPRECATED ---
Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have
permanently figured out the correct scaling.
if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:
self.pixel_to_micron = microscope.objective.pixel_to_micron
elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:
raise ValueError("Conflicting scaling factors: microscope.objective={}, optics={}".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))
elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:
self.pixel_to_micron = pixel_to_micron_scaling
"""
class illumination(object):
def __init__(self, basePath=None, source=None, excitation=None,
emission=None, dichroic=None, illumination_distribution=None,
calculate_illumination_distribution=False, illumPath=None,
illumSavePath=None, illumSaveName=None, showIllumPlot=False,
save_txt=False, save_plot=False, save_image=False):
"""
details about the optical setup
:param source:
:param excitation:
:param emission:
:param dichroic:
"""
self.basePath = basePath
self.source = source
self.excitation_wavelength = excitation
self.emission_wavelength = emission
self.dichroic = dichroic
if illumination_distribution is not None:
self.illumination_distribution = illumination_distribution
elif illumPath is not None:
flatfield = io.imread(illumPath, plugin='tifffile')
if len(np.shape(flatfield)) > 2:
flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),
dtype='uint16')
self.illumination_distribution = flatfield
elif calculate_illumination_distribution and illumination_distribution is None:
self.illumination_distribution = measureIlluminationDistributionXY(
basePath=self.basePath, illumPath=illumPath, show_image=
showIllumPlot, save_image=save_image, save_img_type='.tif',
save_txt=save_txt, show_plot=showIllumPlot, save_plot=
save_plot, savePath=illumSavePath, savename=illumSaveName)
else:
self.illumination_distribution = illumination_distribution
self.flatfield = self.illumination_distribution
if self.flatfield is not None:
self.flatfield_mean = np.mean(self.flatfield)
self.flatfield_std = np.std(self.flatfield)
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis
=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=
darkframePath, flip_image_axes=flip_image_across_axis,
show_image=show_image, save_image=save_image, save_img_type=
save_img_type, savePath=savePath, savename=savename, save_plot=
save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name=
'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=
None, vertical_pixel_shift_speed=5e-07,
horizontal_pixel_shift_speed=1e-07,
horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode
=False, acquisition_mode='kinetic', triggering='internal',
readout_mode='image', pixels=512, pixel_size=1.6e-05):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = pixels, pixels
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = self.pixels[0] * pixel_size, self.pixels[1
] * pixel_size
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None,
magnification=None, basePath=None, channel_height=None,
illumination=None, wavelength=None, microgrid=None,
auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,
field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError(
'A wavelength is required via the <illumination> class or <wavelength> input parameter'
)
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation,
depth_of_field=self.depth_of_field, show_depth_plot=
show_depth_plot, save_depth_plot=save_depth_plot, basePath=
basePath, savename=None, channel_height=channel_height,
objective=self.magnification)
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=1.6e-05, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = (self._wavelength * n / self.
numerical_aperture ** 2 + e * n / (self.magnification * self.
numerical_aperture))
def calculate_depth_of_correlation(self, eps=0.01):
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,
dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError(
'Need objective.microgrid property in order to calculate scaling factor'
)
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None,
feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError('No grid*.tif files found in {}'.format(
self.gridPath))
img_grid = np.zeros(shape=(512, 512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None, diameter=None,
fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials = materials
self.concentration = concentration
self.electrophoretic_mobility = electrophoretic_mobility
self.zeta = zeta
self.diameter = diameter
if diameter:
k_b = 1.3806e-23
T = 298
mu = 0.001
self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)
self.fluorescence_spectra = fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material
=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi * self.diameter ** 2 / 4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = (material.density * g * self.
height_of_reservoir)
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None,
onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir = fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths = passing_wavelengths
self.reflectivity = reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None,
index_of_refraction=None, transparency=None, fluorescence_spectra=
None, permittivity=None, conductivity=None, thickness=None,
youngs_modulus=None, poissons_ratio=None, density=None,
dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=
None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
self.name = name
self.length = length
self.width = width
self.thickness = thickness
self.density = density
self.concentration = concentration
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density * 1e+18
self.Ka = Ka
self.Kb = Kb
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None,
conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
self.name = name
self.species = species
self.concentration = concentration
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10 ** -pH * 1000.0
self.valence = valence
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-09
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CurlypivTestSetup(object):
def __init__(self, name, chip, optics, fluid_handling_system):
"""
All the "settings" used in the experimental setup:
1. chip (class)
1.1 solid material (class) (e.g. SiO2)
1.1.1 transparency
1.1.2 fluorescence spectral characteristics
1.1.3 surface charge density
1.1.4 %/vol (here would be 100%)
1.2 channel (class)
1.2.1 height
1.2.2 width
1.2.3 length
1.3 reservoir volume
1.4 electrode configuration (class)
1.4.1 material
1.4.2 separation distance
1.4.3 distance to channel entrance
2. test solution (class)
2.1 liquid material (class) (e.g. electrolyte)
2.1.1 chemical species (e.g. KCl)
2.1.2 concentration
2.1.3 measurable quantity (class) (e.g. conductivity)
2.1.3.1 theoretical
2.1.3.2 measured
2.1.3.2.1 measured conductivity
2.1.3.2.1 measured date
2.1.4 measurable quantity (class) (e.g. pH)
2.1.4.1 theoretical
2.1.4.2 measured
2.1.4.2.1 measured conductivity
2.1.4.2.1 measured date
2.2 fluorescent particles (class)
2.2.0 diameter
2.2.. measurable quantity (class) (e.g. zeta)
2.2.. measurable quantity (class) (e.g electrophoretic mobility)
2.2.. spectral characteristics
2.2.1 solid materials (class) (e.g. polystyrene)
2.2.1.1 %/vol
2.2.2 liquid materials (class) (e.g. DI water)
2.2.3 liquid materials (Class) (e.g. sodium azide)
2.2.3.1 conductivity
2.2.3.2 concentration
3. illumination (class)
3.1 source (class)
3.1.1 type (e.g. Hg lamp)
3.1.2 intensity
3.1.3 emission spectra
3.2 optical element (class) (e.g. excitation filter)
3.3 optical element (class) (e.g. emission filter)
3.4 optical element (class) (e.g. dichroic mirror)
4. microscope
4.1 type (Olympus iX 73)
4.2 objective (class)
4.2.1 numerical aperature (e.g. 0.3)
4.2.2 magnification (e.g. 20X)
4.2.3 field of view (e.g. 500 x 500 um)
4.2.4 depth of focus (e.g 4.1 microns)
"""
self.name = name
self.chip = chip
self.optics = optics
self.fluid_handling_system = fluid_handling_system
class chip(object):
def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=
None, fluid_handling_system=None, material_in_optical_path=None,
thickness_in_optical_path=None):
"""
Everything important about the chip
"""
self.channel = channel
self.bpe = bpe
self.electrodes = electrodes
self.fluid_handling_system = fluid_handling_system
self.material_in_optical_path = material_in_optical_path
self.thickness_in_optical_path = thickness_in_optical_path
class channel(object):
def __init__(self, length=None, width=None, height=None,
material_bottom_wall_surface=None, material_top_wall_surface=None,
material_fluid=None):
"""
Everything important about the chip
"""
self.length = length
self.width = width
self.height = height
self.material_bottom_wall_surface = material_bottom_wall_surface
self.material_top_wall_surface = material_top_wall_surface
self.material_fluid = material_fluid
class bpe(object):
def __init__(self, length=None, width=None, height=None, material=None,
adhesion_material=None, dielectric_coating=None):
"""
Everything important about the chip
"""
self.length = length
self.linspace_x = np.linspace(-length / 2, length / 2, num=100)
self.width = width
self.height = height
self.material = material
if self.material.thickness:
if self.material.thickness != self.height:
raise ValueError('BPE height must equal BPE material thickness'
)
self.adhesion_material = adhesion_material
if dielectric_coating:
self.dielectric_coating = dielectric_coating
else:
self.dielectric_coating = material_solid(name='no_dielectric',
permittivity=1, thickness=1e-12, Ka=6, Kb=2,
reaction_site_density=5)
class optics(object):
def __init__(self, microscope, fluorescent_particles=None,
calibration_grid=None, pixel_to_micron_scaling=None):
self.microscope = microscope
self.fluorescent_particles = fluorescent_particles
self.calibration_grid = calibration_grid
if self.microscope.objective.magnification == 50:
self.pixel_to_micron_scaling = 0.6
elif self.microscope.objective.magnification == 20:
self.pixel_to_micron_scaling = 1.55
else:
raise ValueError(
'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'
)
if pixel_to_micron_scaling is not None:
print(
'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'
.format(self.pixel_to_micron_scaling, self.microscope.
objective.magnification))
"""
--- I THINK THIS SECTION IS DEPRECATED ---
Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have
permanently figured out the correct scaling.
if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:
self.pixel_to_micron = microscope.objective.pixel_to_micron
elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:
raise ValueError("Conflicting scaling factors: microscope.objective={}, optics={}".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))
elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:
self.pixel_to_micron = pixel_to_micron_scaling
"""
class illumination(object):
def __init__(self, basePath=None, source=None, excitation=None,
emission=None, dichroic=None, illumination_distribution=None,
calculate_illumination_distribution=False, illumPath=None,
illumSavePath=None, illumSaveName=None, showIllumPlot=False,
save_txt=False, save_plot=False, save_image=False):
"""
details about the optical setup
:param source:
:param excitation:
:param emission:
:param dichroic:
"""
self.basePath = basePath
self.source = source
self.excitation_wavelength = excitation
self.emission_wavelength = emission
self.dichroic = dichroic
if illumination_distribution is not None:
self.illumination_distribution = illumination_distribution
elif illumPath is not None:
flatfield = io.imread(illumPath, plugin='tifffile')
if len(np.shape(flatfield)) > 2:
flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),
dtype='uint16')
self.illumination_distribution = flatfield
elif calculate_illumination_distribution and illumination_distribution is None:
self.illumination_distribution = measureIlluminationDistributionXY(
basePath=self.basePath, illumPath=illumPath, show_image=
showIllumPlot, save_image=save_image, save_img_type='.tif',
save_txt=save_txt, show_plot=showIllumPlot, save_plot=
save_plot, savePath=illumSavePath, savename=illumSaveName)
else:
self.illumination_distribution = illumination_distribution
self.flatfield = self.illumination_distribution
if self.flatfield is not None:
self.flatfield_mean = np.mean(self.flatfield)
self.flatfield_std = np.std(self.flatfield)
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis
=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=
darkframePath, flip_image_axes=flip_image_across_axis,
show_image=show_image, save_image=save_image, save_img_type=
save_img_type, savePath=savePath, savename=savename, save_plot=
save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name=
'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=
None, vertical_pixel_shift_speed=5e-07,
horizontal_pixel_shift_speed=1e-07,
horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode
=False, acquisition_mode='kinetic', triggering='internal',
readout_mode='image', pixels=512, pixel_size=1.6e-05):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = pixels, pixels
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = self.pixels[0] * pixel_size, self.pixels[1
] * pixel_size
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None,
magnification=None, basePath=None, channel_height=None,
illumination=None, wavelength=None, microgrid=None,
auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,
field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError(
'A wavelength is required via the <illumination> class or <wavelength> input parameter'
)
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation,
depth_of_field=self.depth_of_field, show_depth_plot=
show_depth_plot, save_depth_plot=save_depth_plot, basePath=
basePath, savename=None, channel_height=channel_height,
objective=self.magnification)
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=1.6e-05, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = (self._wavelength * n / self.
numerical_aperture ** 2 + e * n / (self.magnification * self.
numerical_aperture))
def calculate_depth_of_correlation(self, eps=0.01):
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,
dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError(
'Need objective.microgrid property in order to calculate scaling factor'
)
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None,
feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError('No grid*.tif files found in {}'.format(
self.gridPath))
img_grid = np.zeros(shape=(512, 512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None, diameter=None,
fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials = materials
self.concentration = concentration
self.electrophoretic_mobility = electrophoretic_mobility
self.zeta = zeta
self.diameter = diameter
if diameter:
k_b = 1.3806e-23
T = 298
mu = 0.001
self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)
self.fluorescence_spectra = fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material
=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi * self.diameter ** 2 / 4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = (material.density * g * self.
height_of_reservoir)
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None,
onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir = fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths = passing_wavelengths
self.reflectivity = reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None,
index_of_refraction=None, transparency=None, fluorescence_spectra=
None, permittivity=None, conductivity=None, thickness=None,
youngs_modulus=None, poissons_ratio=None, density=None,
dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=
None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
self.name = name
self.length = length
self.width = width
self.thickness = thickness
self.density = density
self.concentration = concentration
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density * 1e+18
self.Ka = Ka
self.Kb = Kb
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None,
conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
self.name = name
self.species = species
self.concentration = concentration
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10 ** -pH * 1000.0
self.valence = valence
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-09
<|reserved_special_token_1|>
# test CurlypivSetup
"""
Notes about program
"""
# 1.0 import modules
import numpy as np
from skimage import io
import glob
from os.path import join
import matplotlib.pyplot as plt
from curlypiv.utils.calibrateCamera import measureIlluminationDistributionXY, calculate_depth_of_correlation, calculate_darkfield, plot_field_depth
# 2.0 define class
class CurlypivTestSetup(object):
def __init__(self, name, chip, optics, fluid_handling_system):
"""
All the "settings" used in the experimental setup:
1. chip (class)
1.1 solid material (class) (e.g. SiO2)
1.1.1 transparency
1.1.2 fluorescence spectral characteristics
1.1.3 surface charge density
1.1.4 %/vol (here would be 100%)
1.2 channel (class)
1.2.1 height
1.2.2 width
1.2.3 length
1.3 reservoir volume
1.4 electrode configuration (class)
1.4.1 material
1.4.2 separation distance
1.4.3 distance to channel entrance
2. test solution (class)
2.1 liquid material (class) (e.g. electrolyte)
2.1.1 chemical species (e.g. KCl)
2.1.2 concentration
2.1.3 measurable quantity (class) (e.g. conductivity)
2.1.3.1 theoretical
2.1.3.2 measured
2.1.3.2.1 measured conductivity
2.1.3.2.1 measured date
2.1.4 measurable quantity (class) (e.g. pH)
2.1.4.1 theoretical
2.1.4.2 measured
2.1.4.2.1 measured conductivity
2.1.4.2.1 measured date
2.2 fluorescent particles (class)
2.2.0 diameter
2.2.. measurable quantity (class) (e.g. zeta)
2.2.. measurable quantity (class) (e.g electrophoretic mobility)
2.2.. spectral characteristics
2.2.1 solid materials (class) (e.g. polystyrene)
2.2.1.1 %/vol
2.2.2 liquid materials (class) (e.g. DI water)
2.2.3 liquid materials (Class) (e.g. sodium azide)
2.2.3.1 conductivity
2.2.3.2 concentration
3. illumination (class)
3.1 source (class)
3.1.1 type (e.g. Hg lamp)
3.1.2 intensity
3.1.3 emission spectra
3.2 optical element (class) (e.g. excitation filter)
3.3 optical element (class) (e.g. emission filter)
3.4 optical element (class) (e.g. dichroic mirror)
4. microscope
4.1 type (Olympus iX 73)
4.2 objective (class)
4.2.1 numerical aperature (e.g. 0.3)
4.2.2 magnification (e.g. 20X)
4.2.3 field of view (e.g. 500 x 500 um)
4.2.4 depth of focus (e.g 4.1 microns)
"""
self.name = name
self.chip = chip
self.optics = optics
self.fluid_handling_system = fluid_handling_system
class chip(object):
def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=None, fluid_handling_system=None,
material_in_optical_path=None, thickness_in_optical_path=None):
"""
Everything important about the chip
"""
#self.material = material # deprecated so the channel class can hold this information
self.channel = channel
self.bpe = bpe
self.electrodes = electrodes
self.fluid_handling_system = fluid_handling_system
self.material_in_optical_path = material_in_optical_path
self.thickness_in_optical_path = thickness_in_optical_path
class channel(object):
def __init__(self, length=None, width=None, height=None,
material_bottom_wall_surface=None, material_top_wall_surface=None, material_fluid=None):
"""
Everything important about the chip
"""
self.length = length
self.width = width
self.height = height
self.material_bottom_wall_surface = material_bottom_wall_surface # material should only hold relevant electrokinetic data
self.material_top_wall_surface = material_top_wall_surface # material should only hold relevant elect
self.material_fluid = material_fluid # could be a mixture of liquid materials + fluorescent particles
class bpe(object):
def __init__(self, length=None, width=None, height=None, material=None, adhesion_material=None,
dielectric_coating=None):
"""
Everything important about the chip
"""
self.length = length
self.linspace_x = np.linspace(-length/2, length/2, num=100)
self.width = width
self.height = height
self.material = material
if self.material.thickness:
if self.material.thickness != self.height:
raise ValueError("BPE height must equal BPE material thickness")
# adhesion layer used for thin metal film BPE
self.adhesion_material = adhesion_material
# dielectric coating on top of BPE
if dielectric_coating:
self.dielectric_coating = dielectric_coating
else:
self.dielectric_coating = material_solid(name='no_dielectric', permittivity=1, thickness=1e-12, Ka=6, Kb=2, reaction_site_density=5)
class optics(object):
def __init__(self, microscope, fluorescent_particles=None, calibration_grid=None, pixel_to_micron_scaling=None):
self.microscope = microscope
self.fluorescent_particles = fluorescent_particles
self.calibration_grid = calibration_grid
if self.microscope.objective.magnification == 50:
self.pixel_to_micron_scaling = 0.60 # (microns/pixels)
elif self.microscope.objective.magnification == 20:
self.pixel_to_micron_scaling = 1.55 # (microns/pixels)
else:
raise ValueError("Unable to determine microns/pixels scaling because objective magnification not 50X or 20X")
if pixel_to_micron_scaling is not None:
print("Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.".format(self.pixel_to_micron_scaling, self.microscope.objective.magnification))
"""
--- I THINK THIS SECTION IS DEPRECATED ---
Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have
permanently figured out the correct scaling.
if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:
self.pixel_to_micron = microscope.objective.pixel_to_micron
elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:
raise ValueError("Conflicting scaling factors: microscope.objective={}, optics={}".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))
elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:
self.pixel_to_micron = pixel_to_micron_scaling
"""
class illumination(object):
def __init__(self, basePath=None, source=None, excitation=None, emission=None, dichroic=None, illumination_distribution=None,
calculate_illumination_distribution=False,
illumPath=None, illumSavePath=None, illumSaveName=None, showIllumPlot=False, save_txt=False, save_plot=False, save_image=False):
"""
details about the optical setup
:param source:
:param excitation:
:param emission:
:param dichroic:
"""
self.basePath = basePath # this should come from CurlypivTestCollection
self.source = source
self.excitation_wavelength = excitation
self.emission_wavelength = emission
self.dichroic = dichroic
if illumination_distribution is not None:
self.illumination_distribution = illumination_distribution
elif illumPath is not None:
flatfield = io.imread(illumPath, plugin='tifffile')
if len(np.shape(flatfield)) > 2:
flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)), dtype='uint16')
self.illumination_distribution = flatfield
elif calculate_illumination_distribution and illumination_distribution is None:
self.illumination_distribution = measureIlluminationDistributionXY(basePath=self.basePath, illumPath=illumPath,
show_image=showIllumPlot, save_image=save_image, save_img_type='.tif',
save_txt=save_txt, show_plot=showIllumPlot, save_plot=save_plot,
savePath=illumSavePath, savename=illumSaveName)
else:
self.illumination_distribution = illumination_distribution
self.flatfield = self.illumination_distribution
if self.flatfield is not None:
self.flatfield_mean = np.mean(self.flatfield)
self.flatfield_std = np.std(self.flatfield)
class darkfield(object):
def __init__(self, basePath, darkframePath=None, flip_image_across_axis=None, show_image=False, save_image=False, save_img_type='.tif',
savePath=None, savename=None, save_plot=False):
"""
details about dark field image
"""
self.basePath = basePath
img, mean, std = calculate_darkfield(self.basePath, darkframePath=darkframePath, flip_image_axes=flip_image_across_axis, show_image=show_image, save_image=save_image, save_img_type=save_img_type,
savePath=savePath, savename=savename, save_plot=save_plot)
self.img = img
self.mean = mean
self.std = std
class microscope(object):
def __init__(self, type, objective, illumination, ccd):
"""
describes the micrscope setup
:param type:
:param objective:
"""
self.type = type # e.g. Olympus iX73
self.objective = objective
self.illumination = illumination
self.ccd = ccd
class ccd(object):
def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,
vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,
frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',
pixels=512, pixel_size=16e-6):
"""
describe the CCD class
"""
self.name = name
self.img_acq_type = img_acq_type
self.exposure_time = exposure_time
self.img_acq_rate = img_acq_rate
self.em_gain = EM_gain
self.darkfield = darkfield
self.binning = binning
# supporting camera acquisition settings
self.vpss = vertical_pixel_shift_speed
self.hpss = horizontal_pixel_shift_speed
self.hpss_bits = horizontal_pixel_shift_rate_bits
self.frame_transfer = frame_transfer
self.crop_mode = crop_mode
self.acquisition_mode = acquisition_mode
self.triggering = triggering
self.readout_mode = readout_mode
if isinstance(pixels, int):
self.pixels = (pixels, pixels)
else:
self.pixels = pixels
self.pixel_size = pixel_size
self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)
class objective(object):
def __init__(self, fluoro_particle, name=None, numerical_aperture=None, magnification=None, basePath=None, channel_height=None, illumination=None, wavelength=None, microgrid=None, auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None, field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):
"""
Objectives in the Pennathur Lab Dark Room uScope:
20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]
magnification: 20
numerical_aperture: 0.45
field_number: 26.5
working distance: 7.4 - 8.3 mm
transmittance: 90% @ 425 - 670 nm
correction collar: 0 - 1.2 mm
microns per pixel: 1.55
50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]
magnification: 50
numerical aperture: 0.7
field number: 26.5
working distance: 2.2 - 3 mm
transmittance: 90% @ 425 - 650 nm
correction collar: 0 - 1.2 mm
microns per pixel: 0.6
Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428
"""
# if name is entered, then pull all the terms directly
self.name = name
if name == 'LCPLFLN20xLCD':
self.magnification = 20
self.numerical_aperture = 0.45
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 1.55
elif name == 'LCPLFLN50xLCD':
self.magnification = 50
self.numerical_aperture = 0.7
self.field_number = 26.5
self.transmittance = 0.9
self.pixel_to_micron = 0.6
else:
self.numerical_aperture = numerical_aperture
self.magnification = magnification
self.field_number = field_number
# general terms
self._illumination = illumination
if self._illumination is not None:
self._wavelength = self._illumination.emission_wavelength
elif wavelength is not None:
self._wavelength = wavelength
else:
raise ValueError("A wavelength is required via the <illumination> class or <wavelength> input parameter")
self._pd = fluoro_particle.diameter
self._n0 = n0
self.calculate_depth_of_field()
self.calculate_depth_of_correlation()
if field_number:
self.calculate_field_of_view()
if show_depth_plot or save_depth_plot:
plot_field_depth(depth_of_corr=self.depth_of_correlation, depth_of_field=self.depth_of_field, show_depth_plot=show_depth_plot, save_depth_plot=save_depth_plot,
basePath=basePath, savename=None, channel_height=channel_height, objective=self.magnification)
# grids and scaling factors
if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:
self.microgrid = microgrid
self.calculate_pixel_to_micron_scaling()
def calculate_field_of_view(self):
self.field_of_view = self.field_number / self.magnification
def calculate_depth_of_field(self, e=16e-6, n=1):
"""
e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)
"""
self.depth_of_field = self._wavelength*n/self.numerical_aperture**2+e*n/(self.magnification*self.numerical_aperture)
def calculate_depth_of_correlation(self, eps=0.01):
# step 0: define
n = self._n0
dp = self._pd
NA = self.numerical_aperture
M = self.magnification
lmbda = self._wavelength
# step 1: calculate the depth of correlation for the optical setup
depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA, dp=dp, n=n, lmbda=lmbda, eps=eps)
self.depth_of_correlation = depth_of_correlation
def calculate_pixel_to_micron_scaling(self):
if self.microgrid is None:
raise ValueError("Need objective.microgrid property in order to calculate scaling factor")
# script to calculate scaling factor from grid
# would go here
@property
def NA(self):
return self.numerical_aperture
@property
def M(self):
return self.magnification
class microgrid(object):
def __init__(self, gridPath=None, center_to_center_spacing=None, feature_width=None, grid_type='grid', show_grid=False):
"""
this class holds images for the microgrid and performs pixel to micron scaling calculations
"""
if gridPath is not None:
self.gridPath = gridPath
self.spacing = center_to_center_spacing
self.width = feature_width
self.grid_type = grid_type
# find files in directory
file_list = glob.glob(join(self.gridPath, 'grid*.tif'))
if len(file_list) < 1:
raise ValueError("No grid*.tif files found in {}".format(self.gridPath))
img_grid = np.zeros(shape=(512,512))
for f in file_list:
img = io.imread(f, plugin='tifffile')
if len(np.shape(img)) > 2:
img = np.mean(img, axis=0)
img_grid += img
img_grid = img_grid / len(file_list)
self.img_grid = img_grid
if show_grid is True:
fig, ax = plt.subplots()
ax.imshow(img_grid, cmap='gray')
ax.set_xlabel('pixels')
ax.set_ylabel('pixels')
plt.title('grid: 10 um Lines; 50 um Spacing')
plt.show()
class fluorescent_particles(object):
def __init__(self, name=None, materials=None,diameter=None,fluorescence_spectra=None, concentration=None,
electrophoretic_mobility=None, zeta=None):
"""
the details of the fluroescent particles used
:param materials:
:param diameter:
:param fluorescence_spectra:
:param concentration:
:param electrophoretic_mobility:
:param zeta:
"""
self.name = name
self.materials=materials
self.concentration=concentration
self.electrophoretic_mobility=electrophoretic_mobility
self.zeta=zeta
self.diameter=diameter
if diameter:
k_b = 1.3806e-23
T=298
mu=0.001
self.diffusivity = k_b*T/(6*np.pi*mu*diameter/2)
self.fluorescence_spectra=fluorescence_spectra
class reservoir(object):
def __init__(self, diameter, height, height_of_reservoir=None, material=None):
"""
describes the micrscope setup
:param type:
:param objective:
"""
g = 9.81 # m/s**2
self.material = material
self.diameter = diameter
self.height = height
self.volume = np.pi*self.diameter**2/4
self.height_of_reservoir = height_of_reservoir
if material and height_of_reservoir:
self.hydrostatic_pressure = material.density*g*self.height_of_reservoir
class fluid_handling_system(object):
def __init__(self, fluid_reservoir=None, all_tubing=None, onchip_reservoir=None):
"""
describes the fluid handling system
"""
self.fluid_reservoir=fluid_reservoir
self.all_tubing = all_tubing
self.onchip_reservoir = onchip_reservoir
class tubing(object):
def __init__(self, inner_diameter=None, length=None, material=None):
"""
describes each segment of tubing
"""
self.inner_diameter = inner_diameter
self.length = length
self.material = material
class optical_element(object):
def __init__(self, passing_wavelengths=None, reflectivity=None):
"""
this class describes the optical characteristics of any material or element
:param wavelength_bandpass:
"""
self.passing_wavelengths=passing_wavelengths
self.reflectivity=reflectivity
class measurable_quantity(object):
def __init__(self, reference_value=None, measured_value=None):
"""
what value was measured and when
"""
self.reference_value = reference_value
self.measured_value = measured_value
class measurement(object):
def __init__(self, value=None, date=None):
"""
Object for storing measurements
:param value:
:param date:
"""
self.value = value
self.date = date
class electrode_configuration(object):
def __init__(self, material=None, length=None, entrance_length=None):
"""
Object for holding electrode configuration details
:param material:
:param length:
:param entrance_length:
"""
self.material = material
self.length = length
self.entrance_length = entrance_length
class material_solid(object):
def __init__(self, name=None, zeta=None, concentration=None, index_of_refraction=None, transparency=None, fluorescence_spectra=None,
permittivity=None, conductivity=None, thickness=None, youngs_modulus=None, poissons_ratio=None,
density=None, dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=None, width=None, length=None):
"""
everything about a material
:param transparency:
:param fluorescence_spectra:
:param zeta:
"""
# identity
self.name = name
# geometry
self.length = length
self.width = width
self.thickness = thickness
# mechanical
self.density = density
self.concentration = concentration # For a solid, this is % by volume.
self.youngs_modulus = youngs_modulus
self.poissons_ratio = poissons_ratio
# optical
self.index_of_refraction = index_of_refraction
self.fluorescence_spectra = fluorescence_spectra
self.transparency = transparency
if self.transparency:
self.reflectivity = 1 / self.transparency
# electrochemical
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
self.zeta = zeta
self.dielectric_strength = dielectric_strength
if reaction_site_density:
self.reaction_site_density = reaction_site_density*1e18 # (#/nm2) surface density of reaction sites: accepts nm2 and converts to m2 (see Squires)
self.Ka = Ka # reaction equilibrium constant - upper bound
self.Kb = Kb # reaction equilibrium constant - lower bound
class material_liquid(object):
def __init__(self, name=None, species=None, concentration=None, conductivity=None, pH=None, density=None, viscosity=None,
permittivity=None, temperature=None, valence=1.0):
"""
everything about a liquid
:param species:
:param concentration:
:param conductivity:
:param pH:
"""
# identity
self.name = name
# electro/chemical
self.species = species
self.concentration = concentration # (mmol) = (mmol/L) = (mol/m3)
self.conductivity = conductivity
if permittivity:
self.permittivity = permittivity
if pH:
self.pH = pH
self.c_H = 10**-pH * 1e3 # (mmol) = (mmol/L) = (mol/m3); (concentration of Hydrogen ions (H+)
self.valence = valence
# mechanical
self.density = density
self.viscosity = viscosity
self.temperature = temperature
self.diffusivity = 2e-9 # (m^2/s) Diffusivity of KCl in DI water [Soni]
|
flexible
|
{
"blob_id": "6ca7b896cc20220f790c06d4ba08fef7bda8400f",
"index": 3301,
"step-1": "<mask token>\n\n\nclass illumination(object):\n <mask token>\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-2": "<mask token>\n\n\nclass bpe(object):\n <mask token>\n\n\nclass optics(object):\n\n def __init__(self, microscope, fluorescent_particles=None,\n calibration_grid=None, pixel_to_micron_scaling=None):\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.6\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55\n else:\n raise ValueError(\n 'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'\n )\n if pixel_to_micron_scaling is not None:\n print(\n 'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'\n .format(self.pixel_to_micron_scaling, self.microscope.\n objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None,\n emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False, illumPath=None,\n illumSavePath=None, illumSaveName=None, showIllumPlot=False,\n save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),\n dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(\n basePath=self.basePath, illumPath=illumPath, show_image=\n showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=\n save_plot, savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n self.flatfield = self.illumination_distribution\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-3": "<mask token>\n\n\nclass chip(object):\n <mask token>\n\n\nclass channel(object):\n\n def __init__(self, length=None, width=None, height=None,\n material_bottom_wall_surface=None, material_top_wall_surface=None,\n material_fluid=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.width = width\n self.height = height\n self.material_bottom_wall_surface = material_bottom_wall_surface\n self.material_top_wall_surface = material_top_wall_surface\n self.material_fluid = material_fluid\n\n\nclass bpe(object):\n\n def __init__(self, length=None, width=None, height=None, material=None,\n adhesion_material=None, dielectric_coating=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.linspace_x = np.linspace(-length / 2, length / 2, num=100)\n self.width = width\n self.height = height\n self.material = material\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError('BPE height must equal BPE material thickness'\n )\n self.adhesion_material = adhesion_material\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric',\n permittivity=1, thickness=1e-12, Ka=6, Kb=2,\n reaction_site_density=5)\n\n\nclass optics(object):\n\n def __init__(self, microscope, fluorescent_particles=None,\n calibration_grid=None, pixel_to_micron_scaling=None):\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.6\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55\n else:\n raise ValueError(\n 'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'\n )\n if pixel_to_micron_scaling is not None:\n print(\n 'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'\n .format(self.pixel_to_micron_scaling, self.microscope.\n objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None,\n emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False, illumPath=None,\n illumSavePath=None, illumSaveName=None, showIllumPlot=False,\n save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),\n dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(\n basePath=self.basePath, illumPath=illumPath, show_image=\n showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=\n save_plot, savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n self.flatfield = self.illumination_distribution\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-4": "<mask token>\n\n\nclass CurlypivTestSetup(object):\n\n def __init__(self, name, chip, optics, fluid_handling_system):\n \"\"\"\n All the \"settings\" used in the experimental setup:\n 1. chip (class)\n 1.1 solid material (class) (e.g. SiO2)\n 1.1.1 transparency\n 1.1.2 fluorescence spectral characteristics\n 1.1.3 surface charge density\n 1.1.4 %/vol (here would be 100%)\n 1.2 channel (class)\n 1.2.1 height\n 1.2.2 width\n 1.2.3 length\n 1.3 reservoir volume\n 1.4 electrode configuration (class)\n 1.4.1 material\n 1.4.2 separation distance\n 1.4.3 distance to channel entrance\n 2. test solution (class)\n 2.1 liquid material (class) (e.g. electrolyte)\n 2.1.1 chemical species (e.g. KCl)\n 2.1.2 concentration\n 2.1.3 measurable quantity (class) (e.g. conductivity)\n 2.1.3.1 theoretical\n 2.1.3.2 measured\n 2.1.3.2.1 measured conductivity\n 2.1.3.2.1 measured date\n 2.1.4 measurable quantity (class) (e.g. pH)\n 2.1.4.1 theoretical\n 2.1.4.2 measured\n 2.1.4.2.1 measured conductivity\n 2.1.4.2.1 measured date\n 2.2 fluorescent particles (class)\n 2.2.0 diameter\n 2.2.. measurable quantity (class) (e.g. zeta)\n 2.2.. measurable quantity (class) (e.g electrophoretic mobility)\n 2.2.. spectral characteristics\n 2.2.1 solid materials (class) (e.g. polystyrene)\n 2.2.1.1 %/vol\n 2.2.2 liquid materials (class) (e.g. DI water)\n 2.2.3 liquid materials (Class) (e.g. sodium azide)\n 2.2.3.1 conductivity\n 2.2.3.2 concentration\n 3. illumination (class)\n 3.1 source (class)\n 3.1.1 type (e.g. Hg lamp)\n 3.1.2 intensity\n 3.1.3 emission spectra\n 3.2 optical element (class) (e.g. excitation filter)\n 3.3 optical element (class) (e.g. emission filter)\n 3.4 optical element (class) (e.g. dichroic mirror)\n 4. microscope\n 4.1 type (Olympus iX 73)\n 4.2 objective (class)\n 4.2.1 numerical aperature (e.g. 0.3)\n 4.2.2 magnification (e.g. 20X)\n 4.2.3 field of view (e.g. 500 x 500 um)\n 4.2.4 depth of focus (e.g 4.1 microns)\n \"\"\"\n self.name = name\n self.chip = chip\n self.optics = optics\n self.fluid_handling_system = fluid_handling_system\n\n\nclass chip(object):\n\n def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=\n None, fluid_handling_system=None, material_in_optical_path=None,\n thickness_in_optical_path=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.channel = channel\n self.bpe = bpe\n self.electrodes = electrodes\n self.fluid_handling_system = fluid_handling_system\n self.material_in_optical_path = material_in_optical_path\n self.thickness_in_optical_path = thickness_in_optical_path\n\n\nclass channel(object):\n\n def __init__(self, length=None, width=None, height=None,\n material_bottom_wall_surface=None, material_top_wall_surface=None,\n material_fluid=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.width = width\n self.height = height\n self.material_bottom_wall_surface = material_bottom_wall_surface\n self.material_top_wall_surface = material_top_wall_surface\n self.material_fluid = material_fluid\n\n\nclass bpe(object):\n\n def __init__(self, length=None, width=None, height=None, material=None,\n adhesion_material=None, dielectric_coating=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.linspace_x = np.linspace(-length / 2, length / 2, num=100)\n self.width = width\n self.height = height\n self.material = material\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError('BPE height must equal BPE material thickness'\n )\n self.adhesion_material = adhesion_material\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric',\n permittivity=1, thickness=1e-12, Ka=6, Kb=2,\n reaction_site_density=5)\n\n\nclass optics(object):\n\n def __init__(self, microscope, fluorescent_particles=None,\n calibration_grid=None, pixel_to_micron_scaling=None):\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.6\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55\n else:\n raise ValueError(\n 'Unable to determine microns/pixels scaling because objective magnification not 50X or 20X'\n )\n if pixel_to_micron_scaling is not None:\n print(\n 'Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.'\n .format(self.pixel_to_micron_scaling, self.microscope.\n objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None,\n emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False, illumPath=None,\n illumSavePath=None, illumSaveName=None, showIllumPlot=False,\n save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)),\n dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(\n basePath=self.basePath, illumPath=illumPath, show_image=\n showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=\n save_plot, savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n self.flatfield = self.illumination_distribution\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis\n =None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=\n darkframePath, flip_image_axes=flip_image_across_axis,\n show_image=show_image, save_image=save_image, save_img_type=\n save_img_type, savePath=savePath, savename=savename, save_plot=\n save_plot)\n self.img = img\n self.mean = mean\n self.std = std\n\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name=\n 'iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=\n None, vertical_pixel_shift_speed=5e-07,\n horizontal_pixel_shift_speed=1e-07,\n horizontal_pixel_shift_rate_bits=14, frame_transfer=True, crop_mode\n =False, acquisition_mode='kinetic', triggering='internal',\n readout_mode='image', pixels=512, pixel_size=1.6e-05):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n if isinstance(pixels, int):\n self.pixels = pixels, pixels\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = self.pixels[0] * pixel_size, self.pixels[1\n ] * pixel_size\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None,\n magnification=None, basePath=None, channel_height=None,\n illumination=None, wavelength=None, microgrid=None,\n auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None,\n field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n self.name = name\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\n 'A wavelength is required via the <illumination> class or <wavelength> input parameter'\n )\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n if field_number:\n self.calculate_field_of_view()\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation,\n depth_of_field=self.depth_of_field, show_depth_plot=\n show_depth_plot, save_depth_plot=save_depth_plot, basePath=\n basePath, savename=None, channel_height=channel_height,\n objective=self.magnification)\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=1.6e-05, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = (self._wavelength * n / self.\n numerical_aperture ** 2 + e * n / (self.magnification * self.\n numerical_aperture))\n\n def calculate_depth_of_correlation(self, eps=0.01):\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA,\n dp=dp, n=n, lmbda=lmbda, eps=eps)\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\n 'Need objective.microgrid property in order to calculate scaling factor'\n )\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None,\n feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n if len(file_list) < 1:\n raise ValueError('No grid*.tif files found in {}'.format(\n self.gridPath))\n img_grid = np.zeros(shape=(512, 512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n img_grid = img_grid / len(file_list)\n self.img_grid = img_grid\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None, diameter=None,\n fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n self.name = name\n self.materials = materials\n self.concentration = concentration\n self.electrophoretic_mobility = electrophoretic_mobility\n self.zeta = zeta\n self.diameter = diameter\n if diameter:\n k_b = 1.3806e-23\n T = 298\n mu = 0.001\n self.diffusivity = k_b * T / (6 * np.pi * mu * diameter / 2)\n self.fluorescence_spectra = fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material\n =None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi * self.diameter ** 2 / 4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = (material.density * g * self.\n height_of_reservoir)\n\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None,\n onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir = fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths = passing_wavelengths\n self.reflectivity = reflectivity\n\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None,\n index_of_refraction=None, transparency=None, fluorescence_spectra=\n None, permittivity=None, conductivity=None, thickness=None,\n youngs_modulus=None, poissons_ratio=None, density=None,\n dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=\n None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n self.name = name\n self.length = length\n self.width = width\n self.thickness = thickness\n self.density = density\n self.concentration = concentration\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density * 1e+18\n self.Ka = Ka\n self.Kb = Kb\n\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None,\n conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n self.name = name\n self.species = species\n self.concentration = concentration\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10 ** -pH * 1000.0\n self.valence = valence\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-09\n",
"step-5": "# test CurlypivSetup\n\"\"\"\nNotes about program\n\"\"\"\n\n# 1.0 import modules\nimport numpy as np\nfrom skimage import io\nimport glob\nfrom os.path import join\nimport matplotlib.pyplot as plt\nfrom curlypiv.utils.calibrateCamera import measureIlluminationDistributionXY, calculate_depth_of_correlation, calculate_darkfield, plot_field_depth\n\n# 2.0 define class\n\nclass CurlypivTestSetup(object):\n\n def __init__(self, name, chip, optics, fluid_handling_system):\n \"\"\"\n All the \"settings\" used in the experimental setup:\n 1. chip (class)\n 1.1 solid material (class) (e.g. SiO2)\n 1.1.1 transparency\n 1.1.2 fluorescence spectral characteristics\n 1.1.3 surface charge density\n 1.1.4 %/vol (here would be 100%)\n 1.2 channel (class)\n 1.2.1 height\n 1.2.2 width\n 1.2.3 length\n 1.3 reservoir volume\n 1.4 electrode configuration (class)\n 1.4.1 material\n 1.4.2 separation distance\n 1.4.3 distance to channel entrance\n 2. test solution (class)\n 2.1 liquid material (class) (e.g. electrolyte)\n 2.1.1 chemical species (e.g. KCl)\n 2.1.2 concentration\n 2.1.3 measurable quantity (class) (e.g. conductivity)\n 2.1.3.1 theoretical\n 2.1.3.2 measured\n 2.1.3.2.1 measured conductivity\n 2.1.3.2.1 measured date\n 2.1.4 measurable quantity (class) (e.g. pH)\n 2.1.4.1 theoretical\n 2.1.4.2 measured\n 2.1.4.2.1 measured conductivity\n 2.1.4.2.1 measured date\n 2.2 fluorescent particles (class)\n 2.2.0 diameter\n 2.2.. measurable quantity (class) (e.g. zeta)\n 2.2.. measurable quantity (class) (e.g electrophoretic mobility)\n 2.2.. spectral characteristics\n 2.2.1 solid materials (class) (e.g. polystyrene)\n 2.2.1.1 %/vol\n 2.2.2 liquid materials (class) (e.g. DI water)\n 2.2.3 liquid materials (Class) (e.g. sodium azide)\n 2.2.3.1 conductivity\n 2.2.3.2 concentration\n 3. illumination (class)\n 3.1 source (class)\n 3.1.1 type (e.g. Hg lamp)\n 3.1.2 intensity\n 3.1.3 emission spectra\n 3.2 optical element (class) (e.g. excitation filter)\n 3.3 optical element (class) (e.g. emission filter)\n 3.4 optical element (class) (e.g. dichroic mirror)\n 4. microscope\n 4.1 type (Olympus iX 73)\n 4.2 objective (class)\n 4.2.1 numerical aperature (e.g. 0.3)\n 4.2.2 magnification (e.g. 20X)\n 4.2.3 field of view (e.g. 500 x 500 um)\n 4.2.4 depth of focus (e.g 4.1 microns)\n \"\"\"\n self.name = name\n self.chip = chip\n self.optics = optics\n self.fluid_handling_system = fluid_handling_system\n\nclass chip(object):\n\n def __init__(self, channel=None, bpe=None, reservoir=None, electrodes=None, fluid_handling_system=None,\n material_in_optical_path=None, thickness_in_optical_path=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n #self.material = material # deprecated so the channel class can hold this information\n self.channel = channel\n self.bpe = bpe\n self.electrodes = electrodes\n self.fluid_handling_system = fluid_handling_system\n self.material_in_optical_path = material_in_optical_path\n self.thickness_in_optical_path = thickness_in_optical_path\n\nclass channel(object):\n\n def __init__(self, length=None, width=None, height=None,\n material_bottom_wall_surface=None, material_top_wall_surface=None, material_fluid=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.width = width\n self.height = height\n self.material_bottom_wall_surface = material_bottom_wall_surface # material should only hold relevant electrokinetic data\n self.material_top_wall_surface = material_top_wall_surface # material should only hold relevant elect\n self.material_fluid = material_fluid # could be a mixture of liquid materials + fluorescent particles\n\nclass bpe(object):\n\n def __init__(self, length=None, width=None, height=None, material=None, adhesion_material=None,\n dielectric_coating=None):\n \"\"\"\n Everything important about the chip\n \"\"\"\n self.length = length\n self.linspace_x = np.linspace(-length/2, length/2, num=100)\n self.width = width\n self.height = height\n self.material = material\n\n if self.material.thickness:\n if self.material.thickness != self.height:\n raise ValueError(\"BPE height must equal BPE material thickness\")\n\n # adhesion layer used for thin metal film BPE\n self.adhesion_material = adhesion_material\n\n # dielectric coating on top of BPE\n if dielectric_coating:\n self.dielectric_coating = dielectric_coating\n else:\n self.dielectric_coating = material_solid(name='no_dielectric', permittivity=1, thickness=1e-12, Ka=6, Kb=2, reaction_site_density=5)\n\nclass optics(object):\n def __init__(self, microscope, fluorescent_particles=None, calibration_grid=None, pixel_to_micron_scaling=None):\n\n self.microscope = microscope\n self.fluorescent_particles = fluorescent_particles\n self.calibration_grid = calibration_grid\n\n if self.microscope.objective.magnification == 50:\n self.pixel_to_micron_scaling = 0.60 # (microns/pixels)\n elif self.microscope.objective.magnification == 20:\n self.pixel_to_micron_scaling = 1.55 # (microns/pixels)\n else:\n raise ValueError(\"Unable to determine microns/pixels scaling because objective magnification not 50X or 20X\")\n\n if pixel_to_micron_scaling is not None:\n print(\"Manual input of pixel_to_micron_scaling is deprecated. A scaling factor of {} um/pix for {} magnification was instantiated.\".format(self.pixel_to_micron_scaling, self.microscope.objective.magnification))\n \"\"\"\n --- I THINK THIS SECTION IS DEPRECATED ---\n Notes: deprecated because calculating the scaling factor or entering it manually is too confusing. I have\n permanently figured out the correct scaling.\n \n if microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is None:\n self.pixel_to_micron = microscope.objective.pixel_to_micron\n elif microscope.objective.pixel_to_micron is not None and pixel_to_micron_scaling is not None and microscope.objective.pixel_to_micron != pixel_to_micron_scaling:\n raise ValueError(\"Conflicting scaling factors: microscope.objective={}, optics={}\".format(microscope.objective.pixel_to_micron, pixel_to_micron_scaling))\n elif microscope.objective.pixel_to_micron is None and pixel_to_micron_scaling is not None:\n self.pixel_to_micron = pixel_to_micron_scaling\n \"\"\"\n\nclass illumination(object):\n\n def __init__(self, basePath=None, source=None, excitation=None, emission=None, dichroic=None, illumination_distribution=None,\n calculate_illumination_distribution=False,\n illumPath=None, illumSavePath=None, illumSaveName=None, showIllumPlot=False, save_txt=False, save_plot=False, save_image=False):\n \"\"\"\n details about the optical setup\n :param source:\n :param excitation:\n :param emission:\n :param dichroic:\n \"\"\"\n self.basePath = basePath # this should come from CurlypivTestCollection\n self.source = source\n self.excitation_wavelength = excitation\n self.emission_wavelength = emission\n self.dichroic = dichroic\n\n if illumination_distribution is not None:\n self.illumination_distribution = illumination_distribution\n elif illumPath is not None:\n flatfield = io.imread(illumPath, plugin='tifffile')\n if len(np.shape(flatfield)) > 2:\n flatfield = np.asarray(np.rint(np.mean(flatfield, axis=0)), dtype='uint16')\n self.illumination_distribution = flatfield\n elif calculate_illumination_distribution and illumination_distribution is None:\n self.illumination_distribution = measureIlluminationDistributionXY(basePath=self.basePath, illumPath=illumPath,\n show_image=showIllumPlot, save_image=save_image, save_img_type='.tif',\n save_txt=save_txt, show_plot=showIllumPlot, save_plot=save_plot,\n savePath=illumSavePath, savename=illumSaveName)\n else:\n self.illumination_distribution = illumination_distribution\n\n self.flatfield = self.illumination_distribution\n\n if self.flatfield is not None:\n self.flatfield_mean = np.mean(self.flatfield)\n self.flatfield_std = np.std(self.flatfield)\n\nclass darkfield(object):\n\n def __init__(self, basePath, darkframePath=None, flip_image_across_axis=None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n \"\"\"\n details about dark field image\n\n \"\"\"\n self.basePath = basePath\n\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=darkframePath, flip_image_axes=flip_image_across_axis, show_image=show_image, save_image=save_image, save_img_type=save_img_type,\n savePath=savePath, savename=savename, save_plot=save_plot)\n\n self.img = img\n self.mean = mean\n self.std = std\n\nclass microscope(object):\n\n def __init__(self, type, objective, illumination, ccd):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n self.type = type # e.g. Olympus iX73\n self.objective = objective\n self.illumination = illumination\n self.ccd = ccd\n\nclass ccd(object):\n\n def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,\n vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,\n frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',\n pixels=512, pixel_size=16e-6):\n \"\"\"\n describe the CCD class\n \"\"\"\n self.name = name\n self.img_acq_type = img_acq_type\n\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n\n # supporting camera acquisition settings\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n\n if isinstance(pixels, int):\n self.pixels = (pixels, pixels)\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)\n\n\nclass objective(object):\n\n def __init__(self, fluoro_particle, name=None, numerical_aperture=None, magnification=None, basePath=None, channel_height=None, illumination=None, wavelength=None, microgrid=None, auto_calc_pix_to_micron_scaling=False, pixel_to_micron=None, field_number=None, n0=1, show_depth_plot=False, save_depth_plot=False):\n \"\"\"\n\n Objectives in the Pennathur Lab Dark Room uScope:\n\n 20X - LCPlanFL N 20X LCD [LCPLFLN20xLCD]\n magnification: 20\n numerical_aperture: 0.45\n field_number: 26.5\n working distance: 7.4 - 8.3 mm\n transmittance: 90% @ 425 - 670 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 1.55\n 50X - LCPlanFL N 50x LCD [LCPLFLN50xLCD]\n magnification: 50\n numerical aperture: 0.7\n field number: 26.5\n working distance: 2.2 - 3 mm\n transmittance: 90% @ 425 - 650 nm\n correction collar: 0 - 1.2 mm\n microns per pixel: 0.6\n\n Manufacturer website: https://www.olympus-ims.com/en/microscope/lcplfln-lcd/#!cms[focus]=cmsContent11428\n \"\"\"\n\n # if name is entered, then pull all the terms directly\n self.name = name\n\n if name == 'LCPLFLN20xLCD':\n self.magnification = 20\n self.numerical_aperture = 0.45\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 1.55\n elif name == 'LCPLFLN50xLCD':\n self.magnification = 50\n self.numerical_aperture = 0.7\n self.field_number = 26.5\n self.transmittance = 0.9\n self.pixel_to_micron = 0.6\n else:\n self.numerical_aperture = numerical_aperture\n self.magnification = magnification\n self.field_number = field_number\n\n # general terms\n self._illumination = illumination\n if self._illumination is not None:\n self._wavelength = self._illumination.emission_wavelength\n elif wavelength is not None:\n self._wavelength = wavelength\n else:\n raise ValueError(\"A wavelength is required via the <illumination> class or <wavelength> input parameter\")\n self._pd = fluoro_particle.diameter\n self._n0 = n0\n self.calculate_depth_of_field()\n self.calculate_depth_of_correlation()\n\n if field_number:\n self.calculate_field_of_view()\n\n if show_depth_plot or save_depth_plot:\n plot_field_depth(depth_of_corr=self.depth_of_correlation, depth_of_field=self.depth_of_field, show_depth_plot=show_depth_plot, save_depth_plot=save_depth_plot,\n basePath=basePath, savename=None, channel_height=channel_height, objective=self.magnification)\n\n # grids and scaling factors\n if auto_calc_pix_to_micron_scaling and self.pixel_to_micron is None:\n self.microgrid = microgrid\n self.calculate_pixel_to_micron_scaling()\n\n\n def calculate_field_of_view(self):\n self.field_of_view = self.field_number / self.magnification\n\n def calculate_depth_of_field(self, e=16e-6, n=1):\n \"\"\"\n e: CCD pixel resolution example: e = 16 um (16 microns is the pixel size)\n \"\"\"\n self.depth_of_field = self._wavelength*n/self.numerical_aperture**2+e*n/(self.magnification*self.numerical_aperture)\n\n def calculate_depth_of_correlation(self, eps=0.01):\n # step 0: define\n n = self._n0\n dp = self._pd\n NA = self.numerical_aperture\n M = self.magnification\n lmbda = self._wavelength\n\n # step 1: calculate the depth of correlation for the optical setup\n depth_of_correlation = calculate_depth_of_correlation(M=M, NA=NA, dp=dp, n=n, lmbda=lmbda, eps=eps)\n\n self.depth_of_correlation = depth_of_correlation\n\n def calculate_pixel_to_micron_scaling(self):\n if self.microgrid is None:\n raise ValueError(\"Need objective.microgrid property in order to calculate scaling factor\")\n # script to calculate scaling factor from grid\n # would go here\n\n @property\n def NA(self):\n return self.numerical_aperture\n\n @property\n def M(self):\n return self.magnification\n\nclass microgrid(object):\n\n def __init__(self, gridPath=None, center_to_center_spacing=None, feature_width=None, grid_type='grid', show_grid=False):\n \"\"\"\n this class holds images for the microgrid and performs pixel to micron scaling calculations\n \"\"\"\n if gridPath is not None:\n self.gridPath = gridPath\n self.spacing = center_to_center_spacing\n self.width = feature_width\n self.grid_type = grid_type\n\n # find files in directory\n file_list = glob.glob(join(self.gridPath, 'grid*.tif'))\n\n if len(file_list) < 1:\n raise ValueError(\"No grid*.tif files found in {}\".format(self.gridPath))\n\n img_grid = np.zeros(shape=(512,512))\n for f in file_list:\n img = io.imread(f, plugin='tifffile')\n if len(np.shape(img)) > 2:\n img = np.mean(img, axis=0)\n img_grid += img\n\n img_grid = img_grid / len(file_list)\n\n self.img_grid = img_grid\n\n if show_grid is True:\n fig, ax = plt.subplots()\n ax.imshow(img_grid, cmap='gray')\n\n ax.set_xlabel('pixels')\n ax.set_ylabel('pixels')\n plt.title('grid: 10 um Lines; 50 um Spacing')\n plt.show()\n\n\nclass fluorescent_particles(object):\n\n def __init__(self, name=None, materials=None,diameter=None,fluorescence_spectra=None, concentration=None,\n electrophoretic_mobility=None, zeta=None):\n \"\"\"\n the details of the fluroescent particles used\n :param materials:\n :param diameter:\n :param fluorescence_spectra:\n :param concentration:\n :param electrophoretic_mobility:\n :param zeta:\n \"\"\"\n\n self.name = name\n self.materials=materials\n self.concentration=concentration\n self.electrophoretic_mobility=electrophoretic_mobility\n self.zeta=zeta\n self.diameter=diameter\n if diameter:\n k_b = 1.3806e-23\n T=298\n mu=0.001\n self.diffusivity = k_b*T/(6*np.pi*mu*diameter/2)\n\n self.fluorescence_spectra=fluorescence_spectra\n\n\nclass reservoir(object):\n\n def __init__(self, diameter, height, height_of_reservoir=None, material=None):\n \"\"\"\n describes the micrscope setup\n :param type:\n :param objective:\n \"\"\"\n g = 9.81 # m/s**2\n\n self.material = material\n self.diameter = diameter\n self.height = height\n self.volume = np.pi*self.diameter**2/4\n self.height_of_reservoir = height_of_reservoir\n if material and height_of_reservoir:\n self.hydrostatic_pressure = material.density*g*self.height_of_reservoir\n\nclass fluid_handling_system(object):\n\n def __init__(self, fluid_reservoir=None, all_tubing=None, onchip_reservoir=None):\n \"\"\"\n describes the fluid handling system\n \"\"\"\n self.fluid_reservoir=fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir\n\nclass tubing(object):\n\n def __init__(self, inner_diameter=None, length=None, material=None):\n \"\"\"\n describes each segment of tubing\n\n \"\"\"\n self.inner_diameter = inner_diameter\n self.length = length\n self.material = material\n\nclass optical_element(object):\n\n def __init__(self, passing_wavelengths=None, reflectivity=None):\n \"\"\"\n this class describes the optical characteristics of any material or element\n :param wavelength_bandpass:\n \"\"\"\n self.passing_wavelengths=passing_wavelengths\n self.reflectivity=reflectivity\n\nclass measurable_quantity(object):\n\n def __init__(self, reference_value=None, measured_value=None):\n \"\"\"\n what value was measured and when\n \"\"\"\n self.reference_value = reference_value\n self.measured_value = measured_value\n\nclass measurement(object):\n\n def __init__(self, value=None, date=None):\n \"\"\"\n Object for storing measurements\n :param value:\n :param date:\n \"\"\"\n self.value = value\n self.date = date\n\nclass electrode_configuration(object):\n\n def __init__(self, material=None, length=None, entrance_length=None):\n \"\"\"\n Object for holding electrode configuration details\n :param material:\n :param length:\n :param entrance_length:\n \"\"\"\n self.material = material\n self.length = length\n self.entrance_length = entrance_length\n\nclass material_solid(object):\n\n def __init__(self, name=None, zeta=None, concentration=None, index_of_refraction=None, transparency=None, fluorescence_spectra=None,\n permittivity=None, conductivity=None, thickness=None, youngs_modulus=None, poissons_ratio=None,\n density=None, dielectric_strength=None, reaction_site_density=None, Ka=None, Kb=None, width=None, length=None):\n \"\"\"\n everything about a material\n :param transparency:\n :param fluorescence_spectra:\n :param zeta:\n \"\"\"\n # identity\n self.name = name\n\n # geometry\n self.length = length\n self.width = width\n self.thickness = thickness\n\n # mechanical\n self.density = density\n self.concentration = concentration # For a solid, this is % by volume.\n self.youngs_modulus = youngs_modulus\n self.poissons_ratio = poissons_ratio\n\n # optical\n self.index_of_refraction = index_of_refraction\n self.fluorescence_spectra = fluorescence_spectra\n self.transparency = transparency\n if self.transparency:\n self.reflectivity = 1 / self.transparency\n\n # electrochemical\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n self.zeta = zeta\n self.dielectric_strength = dielectric_strength\n if reaction_site_density:\n self.reaction_site_density = reaction_site_density*1e18 # (#/nm2) surface density of reaction sites: accepts nm2 and converts to m2 (see Squires)\n self.Ka = Ka # reaction equilibrium constant - upper bound\n self.Kb = Kb # reaction equilibrium constant - lower bound\n\nclass material_liquid(object):\n\n def __init__(self, name=None, species=None, concentration=None, conductivity=None, pH=None, density=None, viscosity=None,\n permittivity=None, temperature=None, valence=1.0):\n \"\"\"\n everything about a liquid\n :param species:\n :param concentration:\n :param conductivity:\n :param pH:\n \"\"\"\n # identity\n self.name = name\n\n # electro/chemical\n self.species = species\n self.concentration = concentration # (mmol) = (mmol/L) = (mol/m3)\n self.conductivity = conductivity\n if permittivity:\n self.permittivity = permittivity\n if pH:\n self.pH = pH\n self.c_H = 10**-pH * 1e3 # (mmol) = (mmol/L) = (mol/m3); (concentration of Hydrogen ions (H+)\n self.valence = valence\n\n # mechanical\n self.density = density\n self.viscosity = viscosity\n self.temperature = temperature\n self.diffusivity = 2e-9 # (m^2/s) Diffusivity of KCl in DI water [Soni]",
"step-ids": [
37,
41,
45,
48,
50
]
}
|
[
37,
41,
45,
48,
50
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a.sort()
<|reserved_special_token_0|>
for l, h in a:
if h0 < h:
s += (l - l0) * h0
l0, h0 = l, h
<|reserved_special_token_0|>
for l, h in a[::-1]:
if h > h1:
s += (l1 - l) * h1
l1, h1 = l, h
s += (l1 - l0 + 1) * h1
print(s)
<|reserved_special_token_1|>
a = [[*map(int, input().split())] for _ in range(int(input()))]
a.sort()
s = 0
l0, h0 = a[0]
for l, h in a:
if h0 < h:
s += (l - l0) * h0
l0, h0 = l, h
l1, h1 = a[-1]
for l, h in a[::-1]:
if h > h1:
s += (l1 - l) * h1
l1, h1 = l, h
s += (l1 - l0 + 1) * h1
print(s)
<|reserved_special_token_1|>
a=[[*map(int,input().split())]for _ in range(int(input()))]
a.sort()
s=0
l0,h0=a[0]
for l,h in a:
if h0<h:s+=(l-l0)*h0;l0,h0=l,h
l1,h1=a[-1]
for l,h in a[::-1]:
if h>h1:s+=(l1-l)*h1;l1,h1=l,h
s+=(l1-l0+1)*h1
print(s)
|
flexible
|
{
"blob_id": "62dab85b7ab5fdae8117827b2f56bccf99615cb7",
"index": 7341,
"step-1": "<mask token>\n",
"step-2": "<mask token>\na.sort()\n<mask token>\nfor l, h in a:\n if h0 < h:\n s += (l - l0) * h0\n l0, h0 = l, h\n<mask token>\nfor l, h in a[::-1]:\n if h > h1:\n s += (l1 - l) * h1\n l1, h1 = l, h\ns += (l1 - l0 + 1) * h1\nprint(s)\n",
"step-3": "a = [[*map(int, input().split())] for _ in range(int(input()))]\na.sort()\ns = 0\nl0, h0 = a[0]\nfor l, h in a:\n if h0 < h:\n s += (l - l0) * h0\n l0, h0 = l, h\nl1, h1 = a[-1]\nfor l, h in a[::-1]:\n if h > h1:\n s += (l1 - l) * h1\n l1, h1 = l, h\ns += (l1 - l0 + 1) * h1\nprint(s)\n",
"step-4": "a=[[*map(int,input().split())]for _ in range(int(input()))]\na.sort()\ns=0\nl0,h0=a[0]\nfor l,h in a:\n if h0<h:s+=(l-l0)*h0;l0,h0=l,h\nl1,h1=a[-1]\nfor l,h in a[::-1]:\n if h>h1:s+=(l1-l)*h1;l1,h1=l,h\ns+=(l1-l0+1)*h1\nprint(s)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import xl2dict
myxlobject= XlToDict()
myxlobject.convert_sheet_to_dict(file_path="Soul Breaks.xlsx", sheet="First Sheet",
filter_variables_dict={"User Type" : "Admin", "Environment" : "Dev"})
|
normal
|
{
"blob_id": "8ec981bf8746e09d3865bc20dcfbf2fbd797c145",
"index": 7511,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmyxlobject.convert_sheet_to_dict(file_path='Soul Breaks.xlsx', sheet=\n 'First Sheet', filter_variables_dict={'User Type': 'Admin',\n 'Environment': 'Dev'})\n",
"step-3": "<mask token>\nmyxlobject = XlToDict()\nmyxlobject.convert_sheet_to_dict(file_path='Soul Breaks.xlsx', sheet=\n 'First Sheet', filter_variables_dict={'User Type': 'Admin',\n 'Environment': 'Dev'})\n",
"step-4": "import xl2dict\nmyxlobject = XlToDict()\nmyxlobject.convert_sheet_to_dict(file_path='Soul Breaks.xlsx', sheet=\n 'First Sheet', filter_variables_dict={'User Type': 'Admin',\n 'Environment': 'Dev'})\n",
"step-5": "import xl2dict\n\nmyxlobject= XlToDict()\nmyxlobject.convert_sheet_to_dict(file_path=\"Soul Breaks.xlsx\", sheet=\"First Sheet\",\n filter_variables_dict={\"User Type\" : \"Admin\", \"Environment\" : \"Dev\"})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
## Import modules
import matplotlib, sys, datetime, time
matplotlib.use('TkAgg')
from math import *
from numpy import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import dates
import matplotlib.pyplot as plt
from Tkinter import *
## Load the data
data = loadtxt("data/data011c.txt", unpack = True, skiprows=1, comments = '#')
temperature = data[7]
humidity = data[6]
light = data[8]
timer = data[9]
year, month, day, hour, minute, second = data[0], data[1], data[2], data[3], data[4], data[5]
## Make empty are to append the formatted dates
date_times = []
## Format the dates to dd.mm.yyyy hh:mm:ss
for i in range(len(year)): # can be the length of any arbitrary data set
# this makes a nice long string of the "day.month.year hour:min:sec"
date_times.append(str(int(day[i])).zfill(2) + "." + str(int(month[i])).zfill(2) + "." + str(int(year[i])) +
" " + str(int(hour[i])).zfill(2) + ":" + str(int(minute[i])).zfill(2) + ":" + str(int(second[i])).zfill(2) )
## String format of the date
pattern = '%d.%m.%Y %H:%M:%S'
## Convert the list of date_times to epoch time in seconds
epoch = []
for datetimes in date_times:
epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))
## Convert epoch time to list of dateformatter objects
dts = map(datetime.datetime.fromtimestamp, epoch)
fds = dates.date2num(dts)
hfmt = dates.DateFormatter('%m/%d %H:%M')
## Create interface object
master = Tk()
## Set the title and size
master.title("Room Sensor")
master.geometry("1200x600")
## Create figure to add onto interface window
f = Figure(figsize=(9,5), dpi=100,)# facecolor='black')
## Not sure what zorder does
f.zorder
## within the figure create subplot called a
a = f.add_subplot(111)
## Add figure onto interface window
dataPlot = FigureCanvasTkAgg(f, master)
dataPlot.draw()
## Turn figure into a widget
dataPlot.get_tk_widget().place(x = 240, y = 40)
## Add plot toolbar widget
toolbar = NavigationToolbar2TkAgg(dataPlot, master)
toolbar.update()
toolbar.place(x = 240, y = 560)
## Functions to switch between plots
def show_temp():
## Clear the figure
a.clear()
## Plot the temperature
## a.plot(timer,temperature, "r.--")
a.plot(fds,temperature, "r.--")
a.set_ylabel("Temperature (Degrees Celsius)", color = "r")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "r")
## a.set_ylim([20.0,30.0])
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("r")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("r")
## Reset the toolbar
toolbar.update()
f.canvas.draw()
def show_humidity():
a.clear()
a.plot(fds,humidity, "b.--")
a.set_ylabel("Humidity %", color = "b")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "blue")
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("b")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("b")
toolbar.update()
f.canvas.draw()
def show_light():
a.clear()
a.plot(fds,light, "g.--")
a.set_ylabel("Ambient Light", color = "g")
a.xaxis.set_major_formatter(hfmt)
a.grid(color = "g")
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(7)
tick.label.set_rotation(15)
tick.label.set_color("g")
for tick in a.yaxis.get_major_ticks():
tick.label.set_color("g")
toolbar.update()
f.canvas.draw()
## Load icon and button images
tempButton = PhotoImage(file="images/temp_button.gif")
hmdButton = PhotoImage(file="images/hmd_button.gif")
lightButton = PhotoImage(file="images/light_button.gif")
tempIcon = PhotoImage(file="images/temp_icon.gif")
hmdIcon = PhotoImage(file="images/hmd_icon.gif")
lightIcon = PhotoImage(file="images/light_icon.gif")
## Create button widgets
Button1 = Button(master, image = tempButton, command = show_temp, height = 50, width = 109)
Button2 = Button(master, image = hmdButton, command = show_humidity, height = 50, width = 109)
Button3 = Button(master, image = lightButton, command = show_light, height = 50, width = 109)
## Create labels
Label1 = Label(master, image = tempIcon, height = 50, width = 50)
Label2 = Label(master, image = hmdIcon, height = 50, width = 50)
Label3 = Label(master, image = lightIcon, height = 50, width = 50)
## Place the buttons and labels to specific location
Button1.place(x=60,y=110)
Button2.place(x=60,y=260)
Button3.place(x=60,y=410)
Label1.place(x=180, y=111)
Label2.place(x=180, y=261)
Label3.place(x=180, y=411)
## Start with the temperature graph showing
show_temp()
## Run the main interface loop
master.mainloop()
|
normal
|
{
"blob_id": "2de12085ddc73fed85dda8ce3d6908b42fdc4bcc",
"index": 3046,
"step-1": "<mask token>\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef show_temp():\n a.clear()\n a.plot(fds, temperature, 'r.--')\n a.set_ylabel('Temperature (Degrees Celsius)', color='r')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='r')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('r')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('r')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\n<mask token>\n",
"step-3": "<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\ndata = loadtxt('data/data011c.txt', unpack=True, skiprows=1, comments='#')\ntemperature = data[7]\nhumidity = data[6]\nlight = data[8]\ntimer = data[9]\nyear, month, day, hour, minute, second = data[0], data[1], data[2], data[3\n ], data[4], data[5]\ndate_times = []\nfor i in range(len(year)):\n date_times.append(str(int(day[i])).zfill(2) + '.' + str(int(month[i])).\n zfill(2) + '.' + str(int(year[i])) + ' ' + str(int(hour[i])).zfill(\n 2) + ':' + str(int(minute[i])).zfill(2) + ':' + str(int(second[i]))\n .zfill(2))\npattern = '%d.%m.%Y %H:%M:%S'\nepoch = []\nfor datetimes in date_times:\n epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))\ndts = map(datetime.datetime.fromtimestamp, epoch)\nfds = dates.date2num(dts)\nhfmt = dates.DateFormatter('%m/%d %H:%M')\nmaster = Tk()\nmaster.title('Room Sensor')\nmaster.geometry('1200x600')\nf = Figure(figsize=(9, 5), dpi=100)\nf.zorder\na = f.add_subplot(111)\ndataPlot = FigureCanvasTkAgg(f, master)\ndataPlot.draw()\ndataPlot.get_tk_widget().place(x=240, y=40)\ntoolbar = NavigationToolbar2TkAgg(dataPlot, master)\ntoolbar.update()\ntoolbar.place(x=240, y=560)\n\n\ndef show_temp():\n a.clear()\n a.plot(fds, temperature, 'r.--')\n a.set_ylabel('Temperature (Degrees Celsius)', color='r')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='r')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('r')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('r')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\ntempButton = PhotoImage(file='images/temp_button.gif')\nhmdButton = PhotoImage(file='images/hmd_button.gif')\nlightButton = PhotoImage(file='images/light_button.gif')\ntempIcon = PhotoImage(file='images/temp_icon.gif')\nhmdIcon = PhotoImage(file='images/hmd_icon.gif')\nlightIcon = PhotoImage(file='images/light_icon.gif')\nButton1 = Button(master, image=tempButton, command=show_temp, height=50,\n width=109)\nButton2 = Button(master, image=hmdButton, command=show_humidity, height=50,\n width=109)\nButton3 = Button(master, image=lightButton, command=show_light, height=50,\n width=109)\nLabel1 = Label(master, image=tempIcon, height=50, width=50)\nLabel2 = Label(master, image=hmdIcon, height=50, width=50)\nLabel3 = Label(master, image=lightIcon, height=50, width=50)\nButton1.place(x=60, y=110)\nButton2.place(x=60, y=260)\nButton3.place(x=60, y=410)\nLabel1.place(x=180, y=111)\nLabel2.place(x=180, y=261)\nLabel3.place(x=180, y=411)\nshow_temp()\nmaster.mainloop()\n",
"step-4": "import matplotlib, sys, datetime, time\nmatplotlib.use('TkAgg')\nfrom math import *\nfrom numpy import *\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib import dates\nimport matplotlib.pyplot as plt\nfrom Tkinter import *\ndata = loadtxt('data/data011c.txt', unpack=True, skiprows=1, comments='#')\ntemperature = data[7]\nhumidity = data[6]\nlight = data[8]\ntimer = data[9]\nyear, month, day, hour, minute, second = data[0], data[1], data[2], data[3\n ], data[4], data[5]\ndate_times = []\nfor i in range(len(year)):\n date_times.append(str(int(day[i])).zfill(2) + '.' + str(int(month[i])).\n zfill(2) + '.' + str(int(year[i])) + ' ' + str(int(hour[i])).zfill(\n 2) + ':' + str(int(minute[i])).zfill(2) + ':' + str(int(second[i]))\n .zfill(2))\npattern = '%d.%m.%Y %H:%M:%S'\nepoch = []\nfor datetimes in date_times:\n epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))\ndts = map(datetime.datetime.fromtimestamp, epoch)\nfds = dates.date2num(dts)\nhfmt = dates.DateFormatter('%m/%d %H:%M')\nmaster = Tk()\nmaster.title('Room Sensor')\nmaster.geometry('1200x600')\nf = Figure(figsize=(9, 5), dpi=100)\nf.zorder\na = f.add_subplot(111)\ndataPlot = FigureCanvasTkAgg(f, master)\ndataPlot.draw()\ndataPlot.get_tk_widget().place(x=240, y=40)\ntoolbar = NavigationToolbar2TkAgg(dataPlot, master)\ntoolbar.update()\ntoolbar.place(x=240, y=560)\n\n\ndef show_temp():\n a.clear()\n a.plot(fds, temperature, 'r.--')\n a.set_ylabel('Temperature (Degrees Celsius)', color='r')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='r')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('r')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('r')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_humidity():\n a.clear()\n a.plot(fds, humidity, 'b.--')\n a.set_ylabel('Humidity %', color='b')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='blue')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('b')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('b')\n toolbar.update()\n f.canvas.draw()\n\n\ndef show_light():\n a.clear()\n a.plot(fds, light, 'g.--')\n a.set_ylabel('Ambient Light', color='g')\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color='g')\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7)\n tick.label.set_rotation(15)\n tick.label.set_color('g')\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color('g')\n toolbar.update()\n f.canvas.draw()\n\n\ntempButton = PhotoImage(file='images/temp_button.gif')\nhmdButton = PhotoImage(file='images/hmd_button.gif')\nlightButton = PhotoImage(file='images/light_button.gif')\ntempIcon = PhotoImage(file='images/temp_icon.gif')\nhmdIcon = PhotoImage(file='images/hmd_icon.gif')\nlightIcon = PhotoImage(file='images/light_icon.gif')\nButton1 = Button(master, image=tempButton, command=show_temp, height=50,\n width=109)\nButton2 = Button(master, image=hmdButton, command=show_humidity, height=50,\n width=109)\nButton3 = Button(master, image=lightButton, command=show_light, height=50,\n width=109)\nLabel1 = Label(master, image=tempIcon, height=50, width=50)\nLabel2 = Label(master, image=hmdIcon, height=50, width=50)\nLabel3 = Label(master, image=lightIcon, height=50, width=50)\nButton1.place(x=60, y=110)\nButton2.place(x=60, y=260)\nButton3.place(x=60, y=410)\nLabel1.place(x=180, y=111)\nLabel2.place(x=180, y=261)\nLabel3.place(x=180, y=411)\nshow_temp()\nmaster.mainloop()\n",
"step-5": "## Import modules\nimport matplotlib, sys, datetime, time\nmatplotlib.use('TkAgg')\nfrom math import *\nfrom numpy import *\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib import dates\nimport matplotlib.pyplot as plt\nfrom Tkinter import *\n\n## Load the data\ndata = loadtxt(\"data/data011c.txt\", unpack = True, skiprows=1, comments = '#')\ntemperature = data[7]\nhumidity = data[6]\nlight = data[8]\ntimer = data[9]\nyear, month, day, hour, minute, second = data[0], data[1], data[2], data[3], data[4], data[5]\n\n## Make empty are to append the formatted dates\ndate_times = [] \n\n## Format the dates to dd.mm.yyyy hh:mm:ss\nfor i in range(len(year)): # can be the length of any arbitrary data set\n # this makes a nice long string of the \"day.month.year hour:min:sec\"\n date_times.append(str(int(day[i])).zfill(2) + \".\" + str(int(month[i])).zfill(2) + \".\" + str(int(year[i])) +\n \" \" + str(int(hour[i])).zfill(2) + \":\" + str(int(minute[i])).zfill(2) + \":\" + str(int(second[i])).zfill(2) )\n\n## String format of the date\npattern = '%d.%m.%Y %H:%M:%S'\n\n## Convert the list of date_times to epoch time in seconds\nepoch = []\nfor datetimes in date_times:\n epoch.append(int(time.mktime(time.strptime(datetimes, pattern))))\n\n## Convert epoch time to list of dateformatter objects\ndts = map(datetime.datetime.fromtimestamp, epoch)\nfds = dates.date2num(dts)\nhfmt = dates.DateFormatter('%m/%d %H:%M')\n\n## Create interface object\nmaster = Tk()\n## Set the title and size\nmaster.title(\"Room Sensor\")\nmaster.geometry(\"1200x600\")\n\n## Create figure to add onto interface window\nf = Figure(figsize=(9,5), dpi=100,)# facecolor='black')\n## Not sure what zorder does\nf.zorder\n## within the figure create subplot called a\na = f.add_subplot(111)\n\n## Add figure onto interface window\ndataPlot = FigureCanvasTkAgg(f, master)\ndataPlot.draw()\n## Turn figure into a widget\ndataPlot.get_tk_widget().place(x = 240, y = 40)\n## Add plot toolbar widget\ntoolbar = NavigationToolbar2TkAgg(dataPlot, master)\ntoolbar.update()\ntoolbar.place(x = 240, y = 560)\n\n## Functions to switch between plots \n\ndef show_temp():\n ## Clear the figure\n a.clear()\n ## Plot the temperature\n## a.plot(timer,temperature, \"r.--\")\n a.plot(fds,temperature, \"r.--\")\n a.set_ylabel(\"Temperature (Degrees Celsius)\", color = \"r\")\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color = \"r\")\n## a.set_ylim([20.0,30.0])\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7) \n tick.label.set_rotation(15)\n tick.label.set_color(\"r\")\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color(\"r\")\n ## Reset the toolbar\n toolbar.update()\n f.canvas.draw()\n \ndef show_humidity():\n a.clear()\n a.plot(fds,humidity, \"b.--\")\n a.set_ylabel(\"Humidity %\", color = \"b\")\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color = \"blue\")\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7) \n tick.label.set_rotation(15)\n tick.label.set_color(\"b\")\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color(\"b\")\n toolbar.update()\n f.canvas.draw()\n \ndef show_light():\n a.clear()\n a.plot(fds,light, \"g.--\")\n a.set_ylabel(\"Ambient Light\", color = \"g\")\n a.xaxis.set_major_formatter(hfmt)\n a.grid(color = \"g\")\n for tick in a.xaxis.get_major_ticks():\n tick.label.set_fontsize(7) \n tick.label.set_rotation(15)\n tick.label.set_color(\"g\")\n for tick in a.yaxis.get_major_ticks():\n tick.label.set_color(\"g\")\n toolbar.update()\n f.canvas.draw()\n\n## Load icon and button images\ntempButton = PhotoImage(file=\"images/temp_button.gif\")\nhmdButton = PhotoImage(file=\"images/hmd_button.gif\")\nlightButton = PhotoImage(file=\"images/light_button.gif\")\ntempIcon = PhotoImage(file=\"images/temp_icon.gif\")\nhmdIcon = PhotoImage(file=\"images/hmd_icon.gif\")\nlightIcon = PhotoImage(file=\"images/light_icon.gif\")\n\n## Create button widgets\nButton1 = Button(master, image = tempButton, command = show_temp, height = 50, width = 109)\nButton2 = Button(master, image = hmdButton, command = show_humidity, height = 50, width = 109)\nButton3 = Button(master, image = lightButton, command = show_light, height = 50, width = 109)\n## Create labels\nLabel1 = Label(master, image = tempIcon, height = 50, width = 50)\nLabel2 = Label(master, image = hmdIcon, height = 50, width = 50)\nLabel3 = Label(master, image = lightIcon, height = 50, width = 50)\n## Place the buttons and labels to specific location\nButton1.place(x=60,y=110)\nButton2.place(x=60,y=260)\nButton3.place(x=60,y=410)\nLabel1.place(x=180, y=111)\nLabel2.place(x=180, y=261)\nLabel3.place(x=180, y=411)\n## Start with the temperature graph showing\nshow_temp()\n## Run the main interface loop\nmaster.mainloop()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
"""Exercise 7.2. Encapsulate this loop in a function called square_root that takes a as a parameter,
chooses a reasonable value of x, and returns an estimate of the square root of a."""
def my_square_root(a,x) :
e = 0.0001
while True :
y=(x+a/x)/2
if abs(y-x) < e :
return y
break
x = y
a = input("Find square root of which number? ",)
x = input("What is your first guess?")
result = round(my_square_root(float(a),float(x)),3)
print("The square root of ",a,"is ",result)
|
normal
|
{
"blob_id": "c9f4ae94dc901d34a3c0fb4371c8d35a7fe94507",
"index": 5095,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_square_root(a, x):\n e = 0.0001\n while True:\n y = (x + a / x) / 2\n if abs(y - x) < e:\n return y\n break\n x = y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef my_square_root(a, x):\n e = 0.0001\n while True:\n y = (x + a / x) / 2\n if abs(y - x) < e:\n return y\n break\n x = y\n\n\n<mask token>\nprint('The square root of ', a, 'is ', result)\n",
"step-4": "<mask token>\n\n\ndef my_square_root(a, x):\n e = 0.0001\n while True:\n y = (x + a / x) / 2\n if abs(y - x) < e:\n return y\n break\n x = y\n\n\na = input('Find square root of which number? ')\nx = input('What is your first guess?')\nresult = round(my_square_root(float(a), float(x)), 3)\nprint('The square root of ', a, 'is ', result)\n",
"step-5": "\"\"\"Exercise 7.2. Encapsulate this loop in a function called square_root that takes a as a parameter,\nchooses a reasonable value of x, and returns an estimate of the square root of a.\"\"\"\n\ndef my_square_root(a,x) :\n e = 0.0001\n while True :\n y=(x+a/x)/2\n if abs(y-x) < e :\n return y\n break\n x = y\n\na = input(\"Find square root of which number? \",)\nx = input(\"What is your first guess?\") \nresult = round(my_square_root(float(a),float(x)),3)\nprint(\"The square root of \",a,\"is \",result)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Base cache mechanism
"""
import time
import string
import codecs
import pickle
from functools import wraps
from abc import ABCMeta, abstractmethod
from asyncio import iscoroutinefunction
class BaseCache(metaclass=ABCMeta):
"""Base cache class."""
@abstractmethod
def __init__(self, kvstore, makekey, lifetime, fail_silent):
self._kvstore = kvstore
self._makekey = makekey
self._lifetime = lifetime
self._fail_silent = fail_silent
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
"""decorator."""
key = self._makekey(func, args, kwargs)
if self._kvstore.exists(key):
value_str = self._kvstore.get(key)
try:
value = pickle.loads(codecs.decode(value_str.encode(), "base64"))
if self._lifetime is None or time.time() - value['time'] < self._lifetime:
result = value['data']
return result
except: # pylint: disable=W0702
if not self._fail_silent:
raise
result = func(*args, **kwargs)
value = {'time': time.time(), 'data': result}
value_str = codecs.encode(pickle.dumps(value), "base64").decode()
self._kvstore.set(key, value_str)
return result
@wraps(func)
async def async_wrapper(*args, **kwargs):
"""async decorator."""
key = self._makekey(func, args, kwargs)
if self._kvstore.exists(key):
value_str = self._kvstore.get(key)
try:
value = pickle.loads(codecs.decode(value_str.encode(), "base64"))
if self._lifetime is None or time.time() - value['time'] < self._lifetime:
result = value['data']
return result
except: # pylint: disable=W0702
if not self._fail_silent:
raise
result = await func(*args, **kwargs)
value = {'time': time.time(), 'data': result}
value_str = codecs.encode(pickle.dumps(value), "base64").decode()
self._kvstore.set(key, value_str)
return result
if iscoroutinefunction(func):
return async_wrapper
return wrapper
@staticmethod
def makekey(function, *args, **kwargs) -> str:
"""creates a unique key based to be used when storing the cache.
:param function: function
:param *args: positional args of the function
:param **kwargs: keyword arguments of the function
:return: string base64 key
"""
arguments = str((function.__name__, args, kwargs)).strip()
arguments = arguments.translate(
str.maketrans('', '', string.punctuation+string.whitespace)
)
key = codecs.encode(pickle.dumps(arguments, protocol=0), "base64").decode().strip()
return key
|
normal
|
{
"blob_id": "e810cde7f77d36c6a43f8c277b66d038b143aae6",
"index": 6746,
"step-1": "<mask token>\n\n\nclass BaseCache(metaclass=ABCMeta):\n <mask token>\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n <mask token>\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n",
"step-2": "<mask token>\n\n\nclass BaseCache(metaclass=ABCMeta):\n <mask token>\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n",
"step-3": "<mask token>\n\n\nclass BaseCache(metaclass=ABCMeta):\n \"\"\"Base cache class.\"\"\"\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n",
"step-4": "<mask token>\nimport time\nimport string\nimport codecs\nimport pickle\nfrom functools import wraps\nfrom abc import ABCMeta, abstractmethod\nfrom asyncio import iscoroutinefunction\n\n\nclass BaseCache(metaclass=ABCMeta):\n \"\"\"Base cache class.\"\"\"\n\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(),\n 'base64'))\n if self._lifetime is None or time.time() - value['time'\n ] < self._lifetime:\n result = value['data']\n return result\n except:\n if not self._fail_silent:\n raise\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), 'base64').decode()\n self._kvstore.set(key, value_str)\n return result\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) ->str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(str.maketrans('', '', string.\n punctuation + string.whitespace))\n key = codecs.encode(pickle.dumps(arguments, protocol=0), 'base64'\n ).decode().strip()\n return key\n",
"step-5": "\"\"\"\nBase cache mechanism\n\"\"\"\nimport time\nimport string\nimport codecs\nimport pickle\nfrom functools import wraps\nfrom abc import ABCMeta, abstractmethod\nfrom asyncio import iscoroutinefunction\n\n\nclass BaseCache(metaclass=ABCMeta):\n \"\"\"Base cache class.\"\"\"\n @abstractmethod\n def __init__(self, kvstore, makekey, lifetime, fail_silent):\n self._kvstore = kvstore\n self._makekey = makekey\n self._lifetime = lifetime\n self._fail_silent = fail_silent\n\n def __call__(self, func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(), \"base64\"))\n if self._lifetime is None or time.time() - value['time'] < self._lifetime:\n result = value['data']\n return result\n except: # pylint: disable=W0702\n if not self._fail_silent:\n raise\n\n result = func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), \"base64\").decode()\n self._kvstore.set(key, value_str)\n\n return result\n\n @wraps(func)\n async def async_wrapper(*args, **kwargs):\n \"\"\"async decorator.\"\"\"\n key = self._makekey(func, args, kwargs)\n if self._kvstore.exists(key):\n value_str = self._kvstore.get(key)\n try:\n value = pickle.loads(codecs.decode(value_str.encode(), \"base64\"))\n if self._lifetime is None or time.time() - value['time'] < self._lifetime:\n result = value['data']\n return result\n except: # pylint: disable=W0702\n if not self._fail_silent:\n raise\n\n result = await func(*args, **kwargs)\n value = {'time': time.time(), 'data': result}\n value_str = codecs.encode(pickle.dumps(value), \"base64\").decode()\n self._kvstore.set(key, value_str)\n\n return result\n\n if iscoroutinefunction(func):\n return async_wrapper\n return wrapper\n\n @staticmethod\n def makekey(function, *args, **kwargs) -> str:\n \"\"\"creates a unique key based to be used when storing the cache.\n :param function: function\n :param *args: positional args of the function\n :param **kwargs: keyword arguments of the function\n :return: string base64 key\n \"\"\"\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(\n str.maketrans('', '', string.punctuation+string.whitespace)\n )\n key = codecs.encode(pickle.dumps(arguments, protocol=0), \"base64\").decode().strip()\n return key\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import chars2vec
import sklearn.decomposition
import matplotlib.pyplot as plt
import csv
# Load Inutition Engineering pretrained model
# Models names: 'eng_50', 'eng_100', 'eng_150' 'eng_200', 'eng_300'
from sklearn.cluster import KMeans
c2v_model = chars2vec.load_model('eng_50')
words=[]
etichette=[]
with open('datasetParsing2DEF.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
else:
print(row[1],row[2])
words.append(row[2])
etichette.append(row[1])
line_count += 1
print(f'Processed {line_count} lines.')
# Create word embeddings
word_embeddings = c2v_model.vectorize_words(words)
print(word_embeddings)
kmeans = KMeans(
init="random",
n_clusters=4,
n_init=10,
max_iter=200,
random_state=30)
kmeans.fit(word_embeddings),
y_kmeans = kmeans.predict(word_embeddings)
print(y_kmeans)
i=0;
for j in range(0,len(y_kmeans)):
print(etichette[i])
print(word_embeddings[j,0])
print(word_embeddings[j,1])
print()
#plt.scatter(word_embeddings[:, 0], word_embeddings[:, 1],marker=('$' + etichette[i] + '$'),c=y_kmeans, s=1800)
plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1],
marker=('$' + 'O'+ '$'),
s=30, label=j)
i=i+1
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)
plt.show()
|
normal
|
{
"blob_id": "084579152a2cc7feb2c31e0209ce1e32f4905d81",
"index": 5316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f\"Column names are {', '.join(row)}\")\n line_count += 1\n else:\n print(row[1], row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n print(f'Processed {line_count} lines.')\n<mask token>\nprint(word_embeddings)\n<mask token>\nkmeans.fit(word_embeddings),\n<mask token>\nprint(y_kmeans)\n<mask token>\nfor j in range(0, len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j, 0])\n print(word_embeddings[j, 1])\n print()\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' +\n 'O' + '$', s=30, label=j)\n i = i + 1\n<mask token>\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nplt.show()\n",
"step-3": "<mask token>\nc2v_model = chars2vec.load_model('eng_50')\nwords = []\netichette = []\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f\"Column names are {', '.join(row)}\")\n line_count += 1\n else:\n print(row[1], row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n print(f'Processed {line_count} lines.')\nword_embeddings = c2v_model.vectorize_words(words)\nprint(word_embeddings)\nkmeans = KMeans(init='random', n_clusters=4, n_init=10, max_iter=200,\n random_state=30)\nkmeans.fit(word_embeddings),\ny_kmeans = kmeans.predict(word_embeddings)\nprint(y_kmeans)\ni = 0\nfor j in range(0, len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j, 0])\n print(word_embeddings[j, 1])\n print()\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' +\n 'O' + '$', s=30, label=j)\n i = i + 1\ncenters = kmeans.cluster_centers_\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nplt.show()\n",
"step-4": "import chars2vec\nimport sklearn.decomposition\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn.cluster import KMeans\nc2v_model = chars2vec.load_model('eng_50')\nwords = []\netichette = []\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f\"Column names are {', '.join(row)}\")\n line_count += 1\n else:\n print(row[1], row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n print(f'Processed {line_count} lines.')\nword_embeddings = c2v_model.vectorize_words(words)\nprint(word_embeddings)\nkmeans = KMeans(init='random', n_clusters=4, n_init=10, max_iter=200,\n random_state=30)\nkmeans.fit(word_embeddings),\ny_kmeans = kmeans.predict(word_embeddings)\nprint(y_kmeans)\ni = 0\nfor j in range(0, len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j, 0])\n print(word_embeddings[j, 1])\n print()\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' +\n 'O' + '$', s=30, label=j)\n i = i + 1\ncenters = kmeans.cluster_centers_\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nplt.show()\n",
"step-5": "import chars2vec\nimport sklearn.decomposition\nimport matplotlib.pyplot as plt\nimport csv\n\n# Load Inutition Engineering pretrained model\n# Models names: 'eng_50', 'eng_100', 'eng_150' 'eng_200', 'eng_300'\nfrom sklearn.cluster import KMeans\n\nc2v_model = chars2vec.load_model('eng_50')\n\nwords=[]\netichette=[]\n\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(row[1],row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n\n\n print(f'Processed {line_count} lines.')\n\n\n\n# Create word embeddings\nword_embeddings = c2v_model.vectorize_words(words)\nprint(word_embeddings)\n\n\nkmeans = KMeans(\n init=\"random\",\n n_clusters=4,\n n_init=10,\n max_iter=200,\n random_state=30)\n\nkmeans.fit(word_embeddings),\n\ny_kmeans = kmeans.predict(word_embeddings)\nprint(y_kmeans)\ni=0;\nfor j in range(0,len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j,0])\n print(word_embeddings[j,1])\n print()\n #plt.scatter(word_embeddings[:, 0], word_embeddings[:, 1],marker=('$' + etichette[i] + '$'),c=y_kmeans, s=1800)\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1],\n marker=('$' + 'O'+ '$'),\n s=30, label=j)\n i=i+1\n\ncenters = kmeans.cluster_centers_\n\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('total ball:', a * 6)
<|reserved_special_token_0|>
print("computer's run:", comp_runs)
<|reserved_special_token_0|>
print('runs need to win:', comp_runs)
<|reserved_special_token_0|>
print("""-----------------------------------------------
Your Batting
""")
while no_of_chances_1 < chances_1:
runs = int(input('Enter Runs for Your Batting Turn: '))
comp_bowl = random.randint(1, 6)
if runs == comp_bowl:
print('Computer Guess: ', comp_bowl)
print('You are Out. Your Total Runs= ', your_runs, '\n')
break
elif runs > 10:
print('ALERT!! Support No only till 10\n')
continue
else:
your_runs = your_runs + runs
print('Computer Guess: ', comp_bowl)
print('Your runs Now are: ', your_runs, '\n')
if comp_runs < your_runs:
break
no_of_chances_1 = no_of_chances_1 + 1
print("""
-----------------------------------------------
RESULTS: """)
if comp_runs < your_runs:
print('You won the Game.')
elif comp_runs == your_runs:
print('The Game is a Tie')
else:
print('Computer won the Game.')
<|reserved_special_token_1|>
a = int(input('Enter no. of over: '))
print('total ball:', a * 6)
<|reserved_special_token_0|>
comp_runs = random.randint(0, 36)
print("computer's run:", comp_runs)
comp_runs = comp_runs + 1
print('runs need to win:', comp_runs)
chances_1 = a * 6
no_of_chances_1 = 0
your_runs = 0
print("""-----------------------------------------------
Your Batting
""")
while no_of_chances_1 < chances_1:
runs = int(input('Enter Runs for Your Batting Turn: '))
comp_bowl = random.randint(1, 6)
if runs == comp_bowl:
print('Computer Guess: ', comp_bowl)
print('You are Out. Your Total Runs= ', your_runs, '\n')
break
elif runs > 10:
print('ALERT!! Support No only till 10\n')
continue
else:
your_runs = your_runs + runs
print('Computer Guess: ', comp_bowl)
print('Your runs Now are: ', your_runs, '\n')
if comp_runs < your_runs:
break
no_of_chances_1 = no_of_chances_1 + 1
print("""
-----------------------------------------------
RESULTS: """)
if comp_runs < your_runs:
print('You won the Game.')
elif comp_runs == your_runs:
print('The Game is a Tie')
else:
print('Computer won the Game.')
<|reserved_special_token_1|>
a = int(input('Enter no. of over: '))
print('total ball:', a * 6)
import random
comp_runs = random.randint(0, 36)
print("computer's run:", comp_runs)
comp_runs = comp_runs + 1
print('runs need to win:', comp_runs)
chances_1 = a * 6
no_of_chances_1 = 0
your_runs = 0
print("""-----------------------------------------------
Your Batting
""")
while no_of_chances_1 < chances_1:
runs = int(input('Enter Runs for Your Batting Turn: '))
comp_bowl = random.randint(1, 6)
if runs == comp_bowl:
print('Computer Guess: ', comp_bowl)
print('You are Out. Your Total Runs= ', your_runs, '\n')
break
elif runs > 10:
print('ALERT!! Support No only till 10\n')
continue
else:
your_runs = your_runs + runs
print('Computer Guess: ', comp_bowl)
print('Your runs Now are: ', your_runs, '\n')
if comp_runs < your_runs:
break
no_of_chances_1 = no_of_chances_1 + 1
print("""
-----------------------------------------------
RESULTS: """)
if comp_runs < your_runs:
print('You won the Game.')
elif comp_runs == your_runs:
print('The Game is a Tie')
else:
print('Computer won the Game.')
<|reserved_special_token_1|>
a = int(input("Enter no. of over: "))
print("total ball:",a*6 )
import random
comp_runs = random.randint(0,36)
print("computer's run:" ,comp_runs)
comp_runs = comp_runs+1
print("runs need to win:",comp_runs)
chances_1 = a*6
no_of_chances_1 = 0
your_runs = 0
print("-----------------------------------------------\nYour Batting\n")
while no_of_chances_1 < chances_1:
runs = int(input("Enter Runs for Your Batting Turn: "))
comp_bowl = random.randint(1,6)
if runs == comp_bowl:
print("Computer Guess: ", comp_bowl)
print("You are Out. Your Total Runs= ", your_runs, "\n")
break
elif runs > 10:
print("ALERT!! Support No only till 10\n")
continue
else:
your_runs = your_runs + runs
print("Computer Guess: ", comp_bowl)
print("Your runs Now are: ", your_runs, "\n")
if comp_runs < your_runs:
break
no_of_chances_1 = no_of_chances_1 + 1
#after the over ends now result time
print("\n-----------------------------------------------\nRESULTS: ")
if comp_runs < your_runs:
print("You won the Game.")
elif comp_runs == your_runs:
print("The Game is a Tie")
else:
print("Computer won the Game.")
|
flexible
|
{
"blob_id": "00312f57e8a78444937f46cecb62a2b684b4fc91",
"index": 8779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('total ball:', a * 6)\n<mask token>\nprint(\"computer's run:\", comp_runs)\n<mask token>\nprint('runs need to win:', comp_runs)\n<mask token>\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n",
"step-3": "a = int(input('Enter no. of over: '))\nprint('total ball:', a * 6)\n<mask token>\ncomp_runs = random.randint(0, 36)\nprint(\"computer's run:\", comp_runs)\ncomp_runs = comp_runs + 1\nprint('runs need to win:', comp_runs)\nchances_1 = a * 6\nno_of_chances_1 = 0\nyour_runs = 0\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n",
"step-4": "a = int(input('Enter no. of over: '))\nprint('total ball:', a * 6)\nimport random\ncomp_runs = random.randint(0, 36)\nprint(\"computer's run:\", comp_runs)\ncomp_runs = comp_runs + 1\nprint('runs need to win:', comp_runs)\nchances_1 = a * 6\nno_of_chances_1 = 0\nyour_runs = 0\nprint(\"\"\"-----------------------------------------------\nYour Batting\n\"\"\")\nwhile no_of_chances_1 < chances_1:\n runs = int(input('Enter Runs for Your Batting Turn: '))\n comp_bowl = random.randint(1, 6)\n if runs == comp_bowl:\n print('Computer Guess: ', comp_bowl)\n print('You are Out. Your Total Runs= ', your_runs, '\\n')\n break\n elif runs > 10:\n print('ALERT!! Support No only till 10\\n')\n continue\n else:\n your_runs = your_runs + runs\n print('Computer Guess: ', comp_bowl)\n print('Your runs Now are: ', your_runs, '\\n')\n if comp_runs < your_runs:\n break\n no_of_chances_1 = no_of_chances_1 + 1\nprint(\"\"\"\n-----------------------------------------------\nRESULTS: \"\"\")\nif comp_runs < your_runs:\n print('You won the Game.')\nelif comp_runs == your_runs:\n print('The Game is a Tie')\nelse:\n print('Computer won the Game.')\n",
"step-5": "a = int(input(\"Enter no. of over: \"))\r\nprint(\"total ball:\",a*6 )\r\nimport random\r\n\r\ncomp_runs = random.randint(0,36)\r\nprint(\"computer's run:\" ,comp_runs)\r\ncomp_runs = comp_runs+1\r\nprint(\"runs need to win:\",comp_runs)\r\nchances_1 = a*6\r\nno_of_chances_1 = 0\r\nyour_runs = 0\r\n\r\nprint(\"-----------------------------------------------\\nYour Batting\\n\")\r\nwhile no_of_chances_1 < chances_1:\r\n\r\n runs = int(input(\"Enter Runs for Your Batting Turn: \"))\r\n comp_bowl = random.randint(1,6)\r\n\r\n if runs == comp_bowl:\r\n print(\"Computer Guess: \", comp_bowl)\r\n print(\"You are Out. Your Total Runs= \", your_runs, \"\\n\")\r\n break\r\n elif runs > 10:\r\n print(\"ALERT!! Support No only till 10\\n\")\r\n continue\r\n else:\r\n your_runs = your_runs + runs\r\n print(\"Computer Guess: \", comp_bowl)\r\n print(\"Your runs Now are: \", your_runs, \"\\n\")\r\n if comp_runs < your_runs:\r\n break\r\n\r\n no_of_chances_1 = no_of_chances_1 + 1\r\n\r\n#after the over ends now result time\r\n\r\nprint(\"\\n-----------------------------------------------\\nRESULTS: \")\r\n\r\nif comp_runs < your_runs:\r\n print(\"You won the Game.\")\r\n\r\nelif comp_runs == your_runs:\r\n print(\"The Game is a Tie\")\r\n\r\nelse:\r\n print(\"Computer won the Game.\")\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from distributions.zero_inflated_poisson import ZeroInflatedPoisson
from distributions.negative_binomial import NegativeBinomial
from distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial
from distributions.zero_inflated import ZeroInflated
from distributions.categorized import Categorized
from distributions.pareto import Pareto
|
normal
|
{
"blob_id": "dfae1007adc557a15d03b78f2bf790fb5b06141a",
"index": 4442,
"step-1": "<mask token>\n",
"step-2": "from distributions.zero_inflated_poisson import ZeroInflatedPoisson\nfrom distributions.negative_binomial import NegativeBinomial\nfrom distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial\nfrom distributions.zero_inflated import ZeroInflated\nfrom distributions.categorized import Categorized\nfrom distributions.pareto import Pareto\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def longestConsecutive(self, num):
sted = {}
n = len(num)
for item in num:
if item in sted:
continue
sted[item] = item
if item - 1 in sted:
sted[item] = sted[item - 1]
sted[sted[item - 1]] = item
if item + 1 in sted:
tmp = sted[item + 1]
sted[tmp] = sted[item]
sted[sted[item]] = tmp
res = 0
for item in sted:
res = max(res, sted[item] - item)
return res + 1
<|reserved_special_token_1|>
class Solution:
# @param num, a list of integer
# @return an integer
def longestConsecutive(self, num):
sted = {}
n = len(num)
for item in num:
if item in sted:
continue
sted[item] = item
if item-1 in sted:
sted[item] = sted[item-1]
sted[sted[item-1]] = item
if item+1 in sted:
tmp = sted[item+1]
sted[tmp] = sted[item]
sted[sted[item]] = tmp
res = 0
for item in sted:
res = max(res, sted[item] - item)
return res + 1
|
flexible
|
{
"blob_id": "d7c4bee7245dab1cbb90ee68b8e99994ce7dd219",
"index": 3295,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def longestConsecutive(self, num):\n sted = {}\n n = len(num)\n for item in num:\n if item in sted:\n continue\n sted[item] = item\n if item - 1 in sted:\n sted[item] = sted[item - 1]\n sted[sted[item - 1]] = item\n if item + 1 in sted:\n tmp = sted[item + 1]\n sted[tmp] = sted[item]\n sted[sted[item]] = tmp\n res = 0\n for item in sted:\n res = max(res, sted[item] - item)\n return res + 1\n",
"step-4": "class Solution:\n # @param num, a list of integer\n # @return an integer\n def longestConsecutive(self, num):\n sted = {}\n n = len(num)\n for item in num:\n if item in sted:\n continue\n sted[item] = item\n if item-1 in sted:\n sted[item] = sted[item-1]\n sted[sted[item-1]] = item\n if item+1 in sted:\n tmp = sted[item+1]\n sted[tmp] = sted[item]\n sted[sted[item]] = tmp\n \n res = 0\n for item in sted:\n res = max(res, sted[item] - item)\n return res + 1",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):
<|reserved_special_token_0|>
def test_that_all_ranges_are_present(self):
df = get_clean_data()
RANGOS = ['cancelled', '0-1.5', '1.5-3.5', '3.5-']
self.assertCategoricalLevelsEqual(list(df.toPandas()[
'rangoatrasohoras'].unique()), RANGOS)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):
"""
Verifica que los valores de la columna rangoatrasohoras
sean los indicados
"""
def test_that_all_ranges_are_present(self):
df = get_clean_data()
RANGOS = ['cancelled', '0-1.5', '1.5-3.5', '3.5-']
self.assertCategoricalLevelsEqual(list(df.toPandas()[
'rangoatrasohoras'].unique()), RANGOS)
<|reserved_special_token_1|>
import unittest
from marbles.mixins import mixins
import pandas as pd
import requests
from pyspark.sql import SparkSession
import psycopg2 as pg
import pandas as pd
from pyspark.sql.types import StructType, StructField, StringType
from src.features.build_features import get_clean_data
class Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):
"""
Verifica que los valores de la columna rangoatrasohoras
sean los indicados
"""
def test_that_all_ranges_are_present(self):
df = get_clean_data()
RANGOS = ['cancelled', '0-1.5', '1.5-3.5', '3.5-']
self.assertCategoricalLevelsEqual(list(df.toPandas()[
'rangoatrasohoras'].unique()), RANGOS)
<|reserved_special_token_1|>
#python -m marbles test_clean_rangos.py
import unittest
from marbles.mixins import mixins
import pandas as pd
import requests
from pyspark.sql import SparkSession
import psycopg2 as pg
import pandas as pd
from pyspark.sql.types import StructType, StructField, StringType
from src.features.build_features import get_clean_data
class Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):
'''
Verifica que los valores de la columna rangoatrasohoras
sean los indicados
'''
def test_that_all_ranges_are_present(self):
df = get_clean_data()
RANGOS=['cancelled', '0-1.5', '1.5-3.5' ,'3.5-']
self.assertCategoricalLevelsEqual(list(df.toPandas()["rangoatrasohoras"].unique()), RANGOS)
|
flexible
|
{
"blob_id": "f7c6990b4ddbe5ef9d79ef2326e60cdf1f761db3",
"index": 4542,
"step-1": "<mask token>\n\n\nclass Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):\n <mask token>\n\n def test_that_all_ranges_are_present(self):\n df = get_clean_data()\n RANGOS = ['cancelled', '0-1.5', '1.5-3.5', '3.5-']\n self.assertCategoricalLevelsEqual(list(df.toPandas()[\n 'rangoatrasohoras'].unique()), RANGOS)\n",
"step-3": "<mask token>\n\n\nclass Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):\n \"\"\"\n Verifica que los valores de la columna rangoatrasohoras \n sean los indicados\n\n \"\"\"\n\n def test_that_all_ranges_are_present(self):\n df = get_clean_data()\n RANGOS = ['cancelled', '0-1.5', '1.5-3.5', '3.5-']\n self.assertCategoricalLevelsEqual(list(df.toPandas()[\n 'rangoatrasohoras'].unique()), RANGOS)\n",
"step-4": "import unittest\nfrom marbles.mixins import mixins\nimport pandas as pd\nimport requests\nfrom pyspark.sql import SparkSession\nimport psycopg2 as pg\nimport pandas as pd\nfrom pyspark.sql.types import StructType, StructField, StringType\nfrom src.features.build_features import get_clean_data\n\n\nclass Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):\n \"\"\"\n Verifica que los valores de la columna rangoatrasohoras \n sean los indicados\n\n \"\"\"\n\n def test_that_all_ranges_are_present(self):\n df = get_clean_data()\n RANGOS = ['cancelled', '0-1.5', '1.5-3.5', '3.5-']\n self.assertCategoricalLevelsEqual(list(df.toPandas()[\n 'rangoatrasohoras'].unique()), RANGOS)\n",
"step-5": "#python -m marbles test_clean_rangos.py\n\nimport unittest\nfrom marbles.mixins import mixins\nimport pandas as pd\nimport requests\nfrom pyspark.sql import SparkSession\nimport psycopg2 as pg\nimport pandas as pd\nfrom pyspark.sql.types import StructType, StructField, StringType\nfrom src.features.build_features import get_clean_data\n\nclass Test_Ranges_Case(unittest.TestCase, mixins.CategoricalMixins):\n\t'''\n Verifica que los valores de la columna rangoatrasohoras \n sean los indicados\n\n '''\n\n\tdef test_that_all_ranges_are_present(self):\n\n\n\t\tdf = get_clean_data()\n\t\tRANGOS=['cancelled', '0-1.5', '1.5-3.5' ,'3.5-']\n\t\tself.assertCategoricalLevelsEqual(list(df.toPandas()[\"rangoatrasohoras\"].unique()), RANGOS)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('settings', '0003_auto_20210814_2246')]
operations = [migrations.AlterField(model_name='building', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='group', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='lessontype', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='other', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='patterns', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='room', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='roomtype', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='salary', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='staff', name='id', field=
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), migrations.AlterField(model_name=
'student', name='id', field=models.BigAutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), migrations.
AlterField(model_name='subjects', name='id', field=models.
BigAutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), migrations.AlterField(model_name='teacherrole',
name='id', field=models.BigAutoField(auto_created=True, primary_key
=True, serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='teachertypes', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='timetable', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='userprofile', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('settings', '0003_auto_20210814_2246')]
operations = [migrations.AlterField(model_name='building', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='group', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='lessontype', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='other', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='patterns', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='room', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='roomtype', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='salary', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='staff', name='id', field=
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), migrations.AlterField(model_name=
'student', name='id', field=models.BigAutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), migrations.
AlterField(model_name='subjects', name='id', field=models.
BigAutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), migrations.AlterField(model_name='teacherrole',
name='id', field=models.BigAutoField(auto_created=True, primary_key
=True, serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='teachertypes', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), migrations.AlterField(model_name='timetable', name='id',
field=models.BigAutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), migrations.AlterField(
model_name='userprofile', name='id', field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID'))]
<|reserved_special_token_1|>
# Generated by Django 3.2.9 on 2021-11-10 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0003_auto_20210814_2246'),
]
operations = [
migrations.AlterField(
model_name='building',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='group',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='lessontype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='other',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='patterns',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='room',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='roomtype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='salary',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='staff',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='student',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='subjects',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='teacherrole',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='teachertypes',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='timetable',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
flexible
|
{
"blob_id": "9dfbf14a2005aad87be82e5e482c6b0347f32f2c",
"index": 8007,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('settings', '0003_auto_20210814_2246')]\n operations = [migrations.AlterField(model_name='building', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='group', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='lessontype', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='other', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='patterns', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='room', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='roomtype', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='salary', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='staff', name='id', field=\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), migrations.AlterField(model_name=\n 'student', name='id', field=models.BigAutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), migrations.\n AlterField(model_name='subjects', name='id', field=models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), migrations.AlterField(model_name='teacherrole',\n name='id', field=models.BigAutoField(auto_created=True, primary_key\n =True, serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='teachertypes', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='timetable', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='userprofile', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('settings', '0003_auto_20210814_2246')]\n operations = [migrations.AlterField(model_name='building', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='group', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='lessontype', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='other', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='patterns', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='room', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='roomtype', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='salary', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='staff', name='id', field=\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), migrations.AlterField(model_name=\n 'student', name='id', field=models.BigAutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), migrations.\n AlterField(model_name='subjects', name='id', field=models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), migrations.AlterField(model_name='teacherrole',\n name='id', field=models.BigAutoField(auto_created=True, primary_key\n =True, serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='teachertypes', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), migrations.AlterField(model_name='timetable', name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), migrations.AlterField(\n model_name='userprofile', name='id', field=models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID'))]\n",
"step-5": "# Generated by Django 3.2.9 on 2021-11-10 13:36\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('settings', '0003_auto_20210814_2246'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='building',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='group',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='lessontype',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='other',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='patterns',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='room',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='roomtype',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='salary',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='staff',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='student',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='subjects',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='teacherrole',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='teachertypes',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='timetable',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from math import acos, pi, sqrt
from decimal import Decimal, getcontext
getcontext().prec = 30
class Vector(object):
NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple([Decimal(x) for x in coordinates])
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def iszero(self, tolerance=1e-10):
return self.magnitude()<tolerance
def plus(self, v):
if isinstance(v, Vector):
if self.dimension == v.dimension :
return Vector([x+y for x, y in zip(self.coordinates, v.coordinates)])
else:
raise ValueError('dimension not match.')
else:
raise TypeError('not a Vector.')
def minus(self, v):
if isinstance(v, Vector):
if self.dimension == v.dimension :
return Vector([x-y for x, y in zip(self.coordinates, v.coordinates)])
else:
raise ValueError('dimension not match.')
else:
raise TypeError('not a Vector.')
def time_scalar(self, scalar):
try:
return Vector([Decimal(scalar) * x for x in self.coordinates])
except Exception:
raise TypeError('{0} is not a number'.format(scalar))
def magnitude(self):
return Decimal(sqrt(sum([x**2 for x in self.coordinates])))
def normalize(self):
if self.iszero():
raise ValueError("Can't normalize a zero vector.")
else:
return self.time_scalar(Decimal(1.0)/self.magnitude())
def dot(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
else:
if self.dimension != v.dimension:
raise ValueError('dimension not match.')
else:
return sum([x*y for x,y in zip(self.coordinates,v.coordinates)])
def angle_with(self, v, in_degree=False, tolerance=1e-10):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
if self.dimension != v.dimension:
raise ValueError('dimension not match.')
d = self.dot(v)/(self.magnitude()*v.magnitude())
if abs(abs(d)-1) < tolerance:
d = 1 if d>0 else -1
elif abs(d)<tolerance:
d = 0
if in_degree:
return acos(d)/pi*180
else:
return acos(d)
def is_parallel_to(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
if self.iszero() or v.iszero():
return True
v1 = self.normalize()
v2 = v.normalize()
return (v1.minus(v2).iszero() or
v1.plus(v2).iszero())
def is_parallel_to2(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
if self.iszero() or v.iszero():
return True
n = Vector.first_nonzero_index(self.coordinates)
if (v.coordinates[n] == 0):
return False
if abs(self.coordinates[n])<=abs(v.coordinates[n]):
return self.time_scalar(v.coordinates[n] / self.coordinates[n]).minus(v).iszero()
else:
return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(self).iszero()
def is_parallel_to3(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return (self.iszero() or
v.iszero() or
self.angle_with(v) == 0 or
self.angle_with(v) == pi)
def is_orthogonal_to(self, v, tolerance=1e-10):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return abs(self.dot(v)) < tolerance
def component_project_to(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return v.normalize().time_scalar(self.dot(v.normalize()))
def component_orthogonal_to(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return self.minus(self.project(v))
def cross(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
r = []
if ((self.dimension != v.dimension) or
(self.dimension == 1) or
(v.dimension == 1)):
raise ValueError('dimensions not match')
if (self.dimension == v.dimension == 2):
z1 = z2 = Decimal(0.0)
if (self.dimension == v.dimension == 3):
z1 = self.coordinates[2]
z2 = v.coordinates[2]
r.append(self.coordinates[1]*z2 - v.coordinates[1]*z1)
r.append(v.coordinates[0]*z1 - self.coordinates[0]*z2)
r.append(self.coordinates[0]*v.coordinates[1] - v.coordinates[0]*self.coordinates[1])
return Vector(r)
def parallelogram_area(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return self.cross(v).magnitude()
@staticmethod
def first_nonzero_index(iterable):
for k, item in enumerate(iterable):
if not MyDecimal(item).is_near_zero():
return k
raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)
def __getitem__(self, i):
return self.coordinates[i]
def __setitem__(self, i, x):
self.coordinates[i] = x
class MyDecimal(Decimal):
def is_near_zero(self, eps=1e-10):
return abs(self) < eps
|
normal
|
{
"blob_id": "1253e052865860a6895f91204a70152745b04652",
"index": 8498,
"step-1": "<mask token>\n\n\nclass Vector(object):\n <mask token>\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n <mask token>\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n <mask token>\n <mask token>\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n <mask token>\n <mask token>\n <mask token>\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n <mask token>\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n",
"step-2": "<mask token>\n\n\nclass Vector(object):\n <mask token>\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n <mask token>\n <mask token>\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n <mask token>\n <mask token>\n\n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v) / (self.magnitude() * v.magnitude())\n if abs(abs(d) - 1) < tolerance:\n d = 1 if d > 0 else -1\n elif abs(d) < tolerance:\n d = 0\n if in_degree:\n return acos(d) / pi * 180\n else:\n return acos(d)\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n <mask token>\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n",
"step-3": "<mask token>\n\n\nclass Vector(object):\n <mask token>\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n <mask token>\n\n def time_scalar(self, scalar):\n try:\n return Vector([(Decimal(scalar) * x) for x in self.coordinates])\n except Exception:\n raise TypeError('{0} is not a number'.format(scalar))\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n\n def normalize(self):\n if self.iszero():\n raise ValueError(\"Can't normalize a zero vector.\")\n else:\n return self.time_scalar(Decimal(1.0) / self.magnitude())\n\n def dot(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n elif self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n else:\n return sum([(x * y) for x, y in zip(self.coordinates, v.\n coordinates)])\n\n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v) / (self.magnitude() * v.magnitude())\n if abs(abs(d) - 1) < tolerance:\n d = 1 if d > 0 else -1\n elif abs(d) < tolerance:\n d = 0\n if in_degree:\n return acos(d) / pi * 180\n else:\n return acos(d)\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n\n def component_project_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return v.normalize().time_scalar(self.dot(v.normalize()))\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n\n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)\n\n def __getitem__(self, i):\n return self.coordinates[i]\n <mask token>\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n",
"step-4": "<mask token>\ngetcontext().prec = 30\n\n\nclass Vector(object):\n NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'\n\n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n\n def iszero(self, tolerance=1e-10):\n return self.magnitude() < tolerance\n\n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x + y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def minus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension:\n return Vector([(x - y) for x, y in zip(self.coordinates, v.\n coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def time_scalar(self, scalar):\n try:\n return Vector([(Decimal(scalar) * x) for x in self.coordinates])\n except Exception:\n raise TypeError('{0} is not a number'.format(scalar))\n\n def magnitude(self):\n return Decimal(sqrt(sum([(x ** 2) for x in self.coordinates])))\n\n def normalize(self):\n if self.iszero():\n raise ValueError(\"Can't normalize a zero vector.\")\n else:\n return self.time_scalar(Decimal(1.0) / self.magnitude())\n\n def dot(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n elif self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n else:\n return sum([(x * y) for x, y in zip(self.coordinates, v.\n coordinates)])\n\n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v) / (self.magnitude() * v.magnitude())\n if abs(abs(d) - 1) < tolerance:\n d = 1 if d > 0 else -1\n elif abs(d) < tolerance:\n d = 0\n if in_degree:\n return acos(d) / pi * 180\n else:\n return acos(d)\n\n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return v1.minus(v2).iszero() or v1.plus(v2).iszero()\n\n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if v.coordinates[n] == 0:\n return False\n if abs(self.coordinates[n]) <= abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]\n ).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(\n self).iszero()\n\n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.iszero() or v.iszero() or self.angle_with(v\n ) == 0 or self.angle_with(v) == pi\n\n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n\n def component_project_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return v.normalize().time_scalar(self.dot(v.normalize()))\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n\n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if (self.dimension != v.dimension or self.dimension == 1 or v.\n dimension == 1):\n raise ValueError('dimensions not match')\n if self.dimension == v.dimension == 2:\n z1 = z2 = Decimal(0.0)\n if self.dimension == v.dimension == 3:\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1] * z2 - v.coordinates[1] * z1)\n r.append(v.coordinates[0] * z1 - self.coordinates[0] * z2)\n r.append(self.coordinates[0] * v.coordinates[1] - v.coordinates[0] *\n self.coordinates[1])\n return Vector(r)\n\n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n\n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)\n\n def __getitem__(self, i):\n return self.coordinates[i]\n\n def __setitem__(self, i, x):\n self.coordinates[i] = x\n\n\nclass MyDecimal(Decimal):\n\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom math import acos, pi, sqrt\nfrom decimal import Decimal, getcontext\n\ngetcontext().prec = 30\n\nclass Vector(object):\n NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'\n \n def __init__(self, coordinates):\n try:\n if not coordinates:\n raise ValueError\n self.coordinates = tuple([Decimal(x) for x in coordinates])\n self.dimension = len(coordinates)\n\n except ValueError:\n raise ValueError('The coordinates must be nonempty')\n\n except TypeError:\n raise TypeError('The coordinates must be an iterable')\n\n def __str__(self):\n return 'Vector: {}'.format(self.coordinates)\n\n def __eq__(self, v):\n return self.coordinates == v.coordinates\n \n def iszero(self, tolerance=1e-10):\n return self.magnitude()<tolerance\n \n def plus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension :\n return Vector([x+y for x, y in zip(self.coordinates, v.coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def minus(self, v):\n if isinstance(v, Vector):\n if self.dimension == v.dimension :\n return Vector([x-y for x, y in zip(self.coordinates, v.coordinates)])\n else:\n raise ValueError('dimension not match.')\n else:\n raise TypeError('not a Vector.')\n\n def time_scalar(self, scalar):\n try:\n return Vector([Decimal(scalar) * x for x in self.coordinates])\n except Exception:\n raise TypeError('{0} is not a number'.format(scalar))\n \n def magnitude(self):\n return Decimal(sqrt(sum([x**2 for x in self.coordinates])))\n \n def normalize(self):\n if self.iszero():\n raise ValueError(\"Can't normalize a zero vector.\")\n else:\n return self.time_scalar(Decimal(1.0)/self.magnitude())\n \n def dot(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n else:\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n else:\n return sum([x*y for x,y in zip(self.coordinates,v.coordinates)]) \n \n def angle_with(self, v, in_degree=False, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.dimension != v.dimension:\n raise ValueError('dimension not match.')\n d = self.dot(v)/(self.magnitude()*v.magnitude())\n if abs(abs(d)-1) < tolerance:\n d = 1 if d>0 else -1\n elif abs(d)<tolerance:\n d = 0\n if in_degree:\n return acos(d)/pi*180\n else:\n return acos(d)\n \n def is_parallel_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n v1 = self.normalize()\n v2 = v.normalize()\n return (v1.minus(v2).iszero() or \n v1.plus(v2).iszero())\n \n def is_parallel_to2(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n if self.iszero() or v.iszero():\n return True\n n = Vector.first_nonzero_index(self.coordinates)\n if (v.coordinates[n] == 0):\n return False\n if abs(self.coordinates[n])<=abs(v.coordinates[n]):\n return self.time_scalar(v.coordinates[n] / self.coordinates[n]).minus(v).iszero()\n else:\n return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(self).iszero()\n \n def is_parallel_to3(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return (self.iszero() or \n v.iszero() or\n self.angle_with(v) == 0 or\n self.angle_with(v) == pi)\n \n def is_orthogonal_to(self, v, tolerance=1e-10):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return abs(self.dot(v)) < tolerance\n\n def component_project_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return v.normalize().time_scalar(self.dot(v.normalize()))\n\n def component_orthogonal_to(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.minus(self.project(v))\n \n def cross(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n r = []\n if ((self.dimension != v.dimension) or\n (self.dimension == 1) or\n (v.dimension == 1)):\n raise ValueError('dimensions not match')\n if (self.dimension == v.dimension == 2):\n z1 = z2 = Decimal(0.0)\n if (self.dimension == v.dimension == 3):\n z1 = self.coordinates[2]\n z2 = v.coordinates[2]\n r.append(self.coordinates[1]*z2 - v.coordinates[1]*z1)\n r.append(v.coordinates[0]*z1 - self.coordinates[0]*z2)\n r.append(self.coordinates[0]*v.coordinates[1] - v.coordinates[0]*self.coordinates[1])\n return Vector(r)\n \n \n def parallelogram_area(self, v):\n if not isinstance(v, Vector):\n raise TypeError('not a Vector')\n return self.cross(v).magnitude()\n \n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)\n \n def __getitem__(self, i):\n return self.coordinates[i]\n \n def __setitem__(self, i, x):\n self.coordinates[i] = x\n\n \nclass MyDecimal(Decimal):\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps",
"step-ids": [
15,
17,
23,
27,
29
]
}
|
[
15,
17,
23,
27,
29
] |
#! /usr/bin/env python3
"""Publishes joint trajectory to move robot to given pose"""
import rospy
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from std_srvs.srv import Empty
import argparse
import time
def argumentParser(argument):
""" Argument parser """
parser = argparse.ArgumentParser(description='Drive robot joint to command position')
parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',
help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')
#args_ = parser.parse_args(argument)
argv = rospy.myargv()
args_ = parser.parse_args(argv[1:])
prefix = args_.kinova_robotType
nbJoints = int(args_.kinova_robotType[3])
nbfingers = int(args_.kinova_robotType[5])
return prefix, nbJoints, nbfingers
def moveJoint (jointcmds,prefix,nbJoints):
topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'
pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)
jointCmd = JointTrajectory()
point = JointTrajectoryPoint()
jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0);
point.time_from_start = rospy.Duration.from_sec(5.0)
for i in range(0, nbJoints):
jointCmd.joint_names.append(prefix +'_joint_'+str(i+1))
point.positions.append(jointcmds[i])
point.velocities.append(0)
point.accelerations.append(0)
point.effort.append(0)
jointCmd.points.append(point)
rate = rospy.Rate(100)
count = 0
while (count < 50):
pub.publish(jointCmd)
count = count + 1
rate.sleep()
def moveFingers (jointcmds,prefix,nbJoints):
topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'
pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)
jointCmd = JointTrajectory()
point = JointTrajectoryPoint()
jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0);
point.time_from_start = rospy.Duration.from_sec(5.0)
for i in range(0, nbJoints):
jointCmd.joint_names.append(prefix +'_joint_finger_'+str(i+1))
point.positions.append(jointcmds[i])
point.velocities.append(0)
point.accelerations.append(0)
point.effort.append(0)
jointCmd.points.append(point)
rate = rospy.Rate(100)
count = 0
while (count < 500):
pub.publish(jointCmd)
count = count + 1
rate.sleep()
if __name__ == '__main__':
try:
rospy.init_node('move_robot_using_trajectory_msg')
prefix, nbJoints, nbfingers = argumentParser(None)
#allow gazebo to launch
time.sleep(5)
# Unpause the physics
rospy.wait_for_service('/gazebo/unpause_physics')
unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
resp = unpause_gazebo()
if (nbJoints==6):
#home robots
moveJoint ([0.0,2.9,1.3,4.2,1.4,0.0],prefix,nbJoints)
else:
moveJoint ([0.0,2.9,0.0,1.3,4.2,1.4,0.0],prefix,nbJoints)
moveFingers ([1,1,1],prefix,nbfingers)
except rospy.ROSInterruptException:
print("program interrupted before completion")
|
normal
|
{
"blob_id": "ee7c63f36b4720566389826680b90c6f68de85b2",
"index": 5200,
"step-1": "<mask token>\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType',\n type=str, default='j2n6a300', help=\n 'kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.'\n )\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\n nbfingers = int(args_.kinova_robotType[5])\n return prefix, nbJoints, nbfingers\n\n\n<mask token>\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType',\n type=str, default='j2n6a300', help=\n 'kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.'\n )\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\n nbfingers = int(args_.kinova_robotType[5])\n return prefix, nbJoints, nbfingers\n\n\ndef moveJoint(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 50:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('move_robot_using_trajectory_msg')\n prefix, nbJoints, nbfingers = argumentParser(None)\n time.sleep(5)\n rospy.wait_for_service('/gazebo/unpause_physics')\n unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n resp = unpause_gazebo()\n if nbJoints == 6:\n moveJoint([0.0, 2.9, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n else:\n moveJoint([0.0, 2.9, 0.0, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n moveFingers([1, 1, 1], prefix, nbfingers)\n except rospy.ROSInterruptException:\n print('program interrupted before completion')\n",
"step-4": "<mask token>\nimport rospy\nfrom trajectory_msgs.msg import JointTrajectory\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom std_srvs.srv import Empty\nimport argparse\nimport time\n\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType',\n type=str, default='j2n6a300', help=\n 'kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.'\n )\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\n nbfingers = int(args_.kinova_robotType[5])\n return prefix, nbJoints, nbfingers\n\n\ndef moveJoint(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 50:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\ndef moveFingers(jointcmds, prefix, nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory()\n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0)\n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix + '_joint_finger_' + str(i + 1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0)\n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while count < 500:\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('move_robot_using_trajectory_msg')\n prefix, nbJoints, nbfingers = argumentParser(None)\n time.sleep(5)\n rospy.wait_for_service('/gazebo/unpause_physics')\n unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n resp = unpause_gazebo()\n if nbJoints == 6:\n moveJoint([0.0, 2.9, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n else:\n moveJoint([0.0, 2.9, 0.0, 1.3, 4.2, 1.4, 0.0], prefix, nbJoints)\n moveFingers([1, 1, 1], prefix, nbfingers)\n except rospy.ROSInterruptException:\n print('program interrupted before completion')\n",
"step-5": "#! /usr/bin/env python3\n\"\"\"Publishes joint trajectory to move robot to given pose\"\"\"\n\nimport rospy\nfrom trajectory_msgs.msg import JointTrajectory\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom std_srvs.srv import Empty\nimport argparse\nimport time\n\ndef argumentParser(argument):\n \"\"\" Argument parser \"\"\"\n parser = argparse.ArgumentParser(description='Drive robot joint to command position')\n parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',\n help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')\n #args_ = parser.parse_args(argument)\n argv = rospy.myargv()\n args_ = parser.parse_args(argv[1:])\n prefix = args_.kinova_robotType\n nbJoints = int(args_.kinova_robotType[3])\t\n nbfingers = int(args_.kinova_robotType[5])\t\n return prefix, nbJoints, nbfingers\n\ndef moveJoint (jointcmds,prefix,nbJoints):\n topic_name = '/' + prefix + '/effort_joint_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1)\n jointCmd = JointTrajectory() \n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0); \n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix +'_joint_'+str(i+1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0) \n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while (count < 50):\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep() \n\ndef moveFingers (jointcmds,prefix,nbJoints):\n topic_name = '/' + prefix + '/effort_finger_trajectory_controller/command'\n pub = rospy.Publisher(topic_name, JointTrajectory, queue_size=1) \n jointCmd = JointTrajectory() \n point = JointTrajectoryPoint()\n jointCmd.header.stamp = rospy.Time.now() + rospy.Duration.from_sec(0.0); \n point.time_from_start = rospy.Duration.from_sec(5.0)\n for i in range(0, nbJoints):\n jointCmd.joint_names.append(prefix +'_joint_finger_'+str(i+1))\n point.positions.append(jointcmds[i])\n point.velocities.append(0)\n point.accelerations.append(0)\n point.effort.append(0) \n jointCmd.points.append(point)\n rate = rospy.Rate(100)\n count = 0\n while (count < 500):\n pub.publish(jointCmd)\n count = count + 1\n rate.sleep() \n\nif __name__ == '__main__':\n try: \n rospy.init_node('move_robot_using_trajectory_msg')\t\t\n prefix, nbJoints, nbfingers = argumentParser(None) \n #allow gazebo to launch\n time.sleep(5)\n\n # Unpause the physics\n rospy.wait_for_service('/gazebo/unpause_physics')\n unpause_gazebo = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n resp = unpause_gazebo()\n\n if (nbJoints==6):\n #home robots\n moveJoint ([0.0,2.9,1.3,4.2,1.4,0.0],prefix,nbJoints)\n else:\n moveJoint ([0.0,2.9,0.0,1.3,4.2,1.4,0.0],prefix,nbJoints)\n\n moveFingers ([1,1,1],prefix,nbfingers)\n except rospy.ROSInterruptException:\n print(\"program interrupted before completion\")\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
'''
This file creates the model of Post, which maps to the post table in the mysql database.
The model Provider contains four attributes: author, title, content, and created time.
'''
from django.db import models
class Post(models.Model):
'''
The education post by provider database model
'''
author = models.ForeignKey('Provider', on_delete=models.CASCADE)
title = models.CharField(max_length=255, null=False)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}".format(self.title)
|
normal
|
{
"blob_id": "4fa9c00a07c8263a6a3afd460b84f21637a771ec",
"index": 3081,
"step-1": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-2": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-3": "<mask token>\n\n\nclass Post(models.Model):\n \"\"\"\n The education post by provider database model\n \"\"\"\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-4": "<mask token>\nfrom django.db import models\n\n\nclass Post(models.Model):\n \"\"\"\n The education post by provider database model\n \"\"\"\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-5": "\n'''\nThis file creates the model of Post, which maps to the post table in the mysql database. \nThe model Provider contains four attributes: author, title, content, and created time. \n'''\nfrom django.db import models\n\nclass Post(models.Model):\n '''\n The education post by provider database model\n '''\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"{}\".format(self.title)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
def run():
while True:
drivetrain.drive_for(FORWARD, 250, MM)
rightClear, frontClear, leftClear = checkSides()
if frontClear and not rightClear:
print('')
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif not (rightClear and frontClear) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
if down_eye.detect(RED):
break
wait(1, MSEC)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
drivetrain.turn_for(RIGHT, 90, DEGREES)
run()
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
def run():
while True:
drivetrain.drive_for(FORWARD, 250, MM)
rightClear, frontClear, leftClear = checkSides()
if frontClear and not rightClear:
print('')
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif not (rightClear and frontClear) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
if down_eye.detect(RED):
break
wait(1, MSEC)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
drivetrain.turn_for(RIGHT, 90, DEGREES)
run()
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
def run():
while True:
drivetrain.drive_for(FORWARD, 250, MM)
rightClear, frontClear, leftClear = checkSides()
if frontClear and not rightClear:
print('')
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif not (rightClear and frontClear) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
if down_eye.detect(RED):
break
wait(1, MSEC)
vr_thread(main())
<|reserved_special_token_1|>
from vexcode import *
def main():
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
drivetrain.turn_for(RIGHT, 90, DEGREES)
run()
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
def run():
while True:
drivetrain.drive_for(FORWARD, 250, MM)
rightClear, frontClear, leftClear = checkSides()
if frontClear and not rightClear:
print('')
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif not (rightClear and frontClear) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
if down_eye.detect(RED):
break
wait(1, MSEC)
vr_thread(main())
<|reserved_special_token_1|>
# ------------------------------------------
#
# Project: VEXcode VR Maze Solver
# Author: Hyunwoo Choi
# Created: January 12 2021
# Description: Solves a VEXcode VR maze using the right hand rule
#
# ------------------------------------------
# Library imports
from vexcode import *
#main
def main():
#putting down the pen to show the path of the robot
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
#start with 90 deg turned right since we are using a right hand rule to solve this maze
drivetrain.turn_for(RIGHT, 90, DEGREES)
#run
run()
#this method checks all three sides and returns a boolean for each side if it is blocked or not
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
#main run function
def run():
#program loop
while True:
#drive
drivetrain.drive_for(FORWARD, 250, MM)
#checks if the robot's surroundings are clear by using the method above
rightClear, frontClear, leftClear = checkSides()
#uses the 3 boolean values above to determine the which direction to turn
if frontClear and not rightClear:
print("")
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif (not (rightClear and frontClear)) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
#if found an exit, stop
if(down_eye.detect(RED)):
break
wait(1,MSEC)
# VR threads — Do not delete
vr_thread(main())
|
flexible
|
{
"blob_id": "e560f2f202e477822729d1361b8d7ef7831a00e6",
"index": 8339,
"step-1": "<mask token>\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\nvr_thread(main())\n",
"step-4": "from vexcode import *\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\nvr_thread(main())\n",
"step-5": "# ------------------------------------------\n# \n# \tProject: VEXcode VR Maze Solver\n#\tAuthor: Hyunwoo Choi\n#\tCreated: January 12 2021\n#\tDescription: Solves a VEXcode VR maze using the right hand rule\n# \n# ------------------------------------------\n\n# Library imports\nfrom vexcode import *\n\n#main\ndef main():\n #putting down the pen to show the path of the robot\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n\n \n #start with 90 deg turned right since we are using a right hand rule to solve this maze\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n \n #run\n run()\n\n#this method checks all three sides and returns a boolean for each side if it is blocked or not\ndef checkSides():\n \n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n \n drivetrain.turn_for(RIGHT, 90, DEGREES)\n\n return rightC, frontC, leftC\n\n#main run function\ndef run():\n #program loop\n while True:\n\n #drive\n drivetrain.drive_for(FORWARD, 250, MM)\n\n #checks if the robot's surroundings are clear by using the method above\n rightClear, frontClear, leftClear = checkSides()\n\n #uses the 3 boolean values above to determine the which direction to turn\n if frontClear and not rightClear:\n print(\"\")\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif (not (rightClear and frontClear)) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n\n #if found an exit, stop\n if(down_eye.detect(RED)):\n break\n\n wait(1,MSEC)\n\n \n \n# VR threads — Do not delete\nvr_thread(main())\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Start simulation!')
<|reserved_special_token_0|>
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
<|reserved_special_token_0|>
print('Common parameters were set.')
<|reserved_special_token_0|>
print('Plant model was set.')
<|reserved_special_token_0|>
print('PID controller was designed.')
<|reserved_special_token_0|>
print('Phase lead filters were desinged.')
print('Frequency respose alanysis is running...')
<|reserved_special_token_0|>
print('Plotting figures...')
<|reserved_special_token_0|>
plot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of plant')
plot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,
legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName + '/freq_P.png')
<|reserved_special_token_0|>
plot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,
title='Frequency response of PID controller')
plot.savefig(figurefolderName + '/freq_C.png')
<|reserved_special_token_0|>
plot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of filters')
plot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,
[-10, 10], legend=['PL for motor side', 'PL for load side'])
plot.savefig(figurefolderName + '/freq_PL.png')
<|reserved_special_token_0|>
plot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of open loop transfer function')
plot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,
legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName + '/freq_G.png')
<|reserved_special_token_0|>
plot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,
[-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.savefig(figurefolderName + '/freq_S.png')
<|reserved_special_token_0|>
plot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of complementary sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,
[-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.savefig(figurefolderName + '/freq_T.png')
<|reserved_special_token_0|>
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',
'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName + '/nyquist.png')
<|reserved_special_token_0|>
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],
yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName + '/nyquist_.png')
print('Finished.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Start simulation!')
figurefolderName = 'figure_2mass_pl'
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
Ts = 1 / 4000
dataNum = 10000
freqrange = [1, 1000]
freq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum,
base=10)
s = ctrl.tf([1, 0], [1])
z = ctrl.tf([1, 0], [1], Ts)
print('Common parameters were set.')
M1 = 1.0
M2 = 1.0
M = M1 + M2
C = 10.0
K = 0.0
Creso = 10.0
Kreso = 50000.0
k1 = M2 / (M1 * (M1 + M2))
k2 = -1.0 / (M1 + M2)
omegaPreso = np.sqrt(Kreso * (M1 + M2) / (M1 * M2))
zetaPreso = 0.5 * Creso * np.sqrt((M1 + M2) / (Kreso * M1 * M2))
Pmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2 * zetaPreso *
omegaPreso, omegaPreso ** 2])
Pmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2 * zetaPreso *
omegaPreso, omegaPreso ** 2])
numDelay, denDelay = matlab.pade(Ts * 4, n=4)
Ds = ctrl.tf(numDelay, denDelay)
Dz = z ** -4
Pns1 = Pmechs1 * Ds
Pns2 = Pmechs2 * Ds
Pnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz
Pnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz
Pnz1_frd = ctrl.sys2frd(Pnz1, freq)
Pnz2_frd = ctrl.sys2frd(Pnz2, freq)
print('Plant model was set.')
freq1 = 10.0
zeta1 = 1.0
freq2 = 10.0
zeta2 = 1.0
Cz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)
Cz_frd = ctrl.sys2frd(Cz, freq)
print('PID controller was designed.')
zeta1 = 0.7
freq1 = 40
zeta2 = 0.7
freq2 = 60
PLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)
PLz1_frd = ctrl.sys2frd(PLz1, freq)
PLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)
PLz2_frd = ctrl.sys2frd(PLz2, freq)
print('Phase lead filters were desinged.')
print('Frequency respose alanysis is running...')
Gn1_frd = Pnz1_frd * Cz_frd
Sn1_frd = 1 / (1 + Gn1_frd)
Tn1_frd = 1 - Sn1_frd
Gn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd
Sn1_pl_frd = 1 / (1 + Gn1_pl_frd)
Tn1_pl_frd = 1 - Sn1_pl_frd
Gn2_frd = Pnz2_frd * Cz_frd
Sn2_frd = 1 / (1 + Gn2_frd)
Tn2_frd = 1 - Sn2_frd
Gn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd
Sn2_pl_frd = 1 / (1 + Gn2_pl_frd)
Tn2_pl_frd = 1 - Sn2_pl_frd
print('Plotting figures...')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of plant')
plot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,
legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName + '/freq_P.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,
title='Frequency response of PID controller')
plot.savefig(figurefolderName + '/freq_C.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of filters')
plot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,
[-10, 10], legend=['PL for motor side', 'PL for load side'])
plot.savefig(figurefolderName + '/freq_PL.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of open loop transfer function')
plot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,
legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName + '/freq_G.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(111)
ax_phase = None
plot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,
[-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.savefig(figurefolderName + '/freq_S.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of complementary sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,
[-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.savefig(figurefolderName + '/freq_T.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',
'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName + '/nyquist.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],
yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName + '/nyquist_.png')
print('Finished.')
<|reserved_special_token_1|>
from pylib_sakata import init as init
import os
import shutil
import numpy as np
from control import matlab
from pylib_sakata import ctrl
from pylib_sakata import plot
print('Start simulation!')
figurefolderName = 'figure_2mass_pl'
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
Ts = 1 / 4000
dataNum = 10000
freqrange = [1, 1000]
freq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum,
base=10)
s = ctrl.tf([1, 0], [1])
z = ctrl.tf([1, 0], [1], Ts)
print('Common parameters were set.')
M1 = 1.0
M2 = 1.0
M = M1 + M2
C = 10.0
K = 0.0
Creso = 10.0
Kreso = 50000.0
k1 = M2 / (M1 * (M1 + M2))
k2 = -1.0 / (M1 + M2)
omegaPreso = np.sqrt(Kreso * (M1 + M2) / (M1 * M2))
zetaPreso = 0.5 * Creso * np.sqrt((M1 + M2) / (Kreso * M1 * M2))
Pmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2 * zetaPreso *
omegaPreso, omegaPreso ** 2])
Pmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2 * zetaPreso *
omegaPreso, omegaPreso ** 2])
numDelay, denDelay = matlab.pade(Ts * 4, n=4)
Ds = ctrl.tf(numDelay, denDelay)
Dz = z ** -4
Pns1 = Pmechs1 * Ds
Pns2 = Pmechs2 * Ds
Pnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz
Pnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz
Pnz1_frd = ctrl.sys2frd(Pnz1, freq)
Pnz2_frd = ctrl.sys2frd(Pnz2, freq)
print('Plant model was set.')
freq1 = 10.0
zeta1 = 1.0
freq2 = 10.0
zeta2 = 1.0
Cz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)
Cz_frd = ctrl.sys2frd(Cz, freq)
print('PID controller was designed.')
zeta1 = 0.7
freq1 = 40
zeta2 = 0.7
freq2 = 60
PLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)
PLz1_frd = ctrl.sys2frd(PLz1, freq)
PLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)
PLz2_frd = ctrl.sys2frd(PLz2, freq)
print('Phase lead filters were desinged.')
print('Frequency respose alanysis is running...')
Gn1_frd = Pnz1_frd * Cz_frd
Sn1_frd = 1 / (1 + Gn1_frd)
Tn1_frd = 1 - Sn1_frd
Gn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd
Sn1_pl_frd = 1 / (1 + Gn1_pl_frd)
Tn1_pl_frd = 1 - Sn1_pl_frd
Gn2_frd = Pnz2_frd * Cz_frd
Sn2_frd = 1 / (1 + Gn2_frd)
Tn2_frd = 1 - Sn2_frd
Gn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd
Sn2_pl_frd = 1 / (1 + Gn2_pl_frd)
Tn2_pl_frd = 1 - Sn2_pl_frd
print('Plotting figures...')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of plant')
plot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,
legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName + '/freq_P.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,
title='Frequency response of PID controller')
plot.savefig(figurefolderName + '/freq_C.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of filters')
plot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,
[-10, 10], legend=['PL for motor side', 'PL for load side'])
plot.savefig(figurefolderName + '/freq_PL.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of open loop transfer function')
plot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,
legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName + '/freq_G.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(111)
ax_phase = None
plot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,
[-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.savefig(figurefolderName + '/freq_S.png')
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=
'Frequency response of complementary sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,
[-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.savefig(figurefolderName + '/freq_T.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',
'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName + '/nyquist.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],
yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',
'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName + '/nyquist_.png')
print('Finished.')
<|reserved_special_token_1|>
# Copyright (c) 2021 Koichi Sakata
from pylib_sakata import init as init
# uncomment the follows when the file is executed in a Python console.
# init.close_all()
# init.clear_all()
import os
import shutil
import numpy as np
from control import matlab
from pylib_sakata import ctrl
from pylib_sakata import plot
print('Start simulation!')
# Common parameters
figurefolderName = 'figure_2mass_pl'
if os.path.exists(figurefolderName):
shutil.rmtree(figurefolderName)
os.makedirs(figurefolderName)
Ts = 1/4000
dataNum = 10000
freqrange = [1, 1000]
freq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum, base=10)
s = ctrl.tf([1, 0], [1])
z = ctrl.tf([1, 0], [1], Ts)
print('Common parameters were set.')
# Plant model
M1 = 1.0
M2 = 1.0
M = M1 + M2
C = 10.0
K = 0.0
Creso = 10.0
Kreso = 50000.0
k1 = M2/(M1 * (M1 + M2))
k2 = -1.0/(M1 + M2)
omegaPreso = np.sqrt(Kreso * (M1 + M2)/(M1 * M2))
zetaPreso = 0.5 * Creso*np.sqrt((M1 + M2)/(Kreso * M1 * M2))
Pmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])
Pmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])
numDelay, denDelay = matlab.pade(Ts*4, n=4)
Ds = ctrl.tf(numDelay, denDelay)
Dz = z**-4
Pns1 = Pmechs1 * Ds
Pns2 = Pmechs2 * Ds
Pnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz
Pnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz
Pnz1_frd = ctrl.sys2frd(Pnz1, freq)
Pnz2_frd = ctrl.sys2frd(Pnz2, freq)
print('Plant model was set.')
# Design PID controller
freq1 = 10.0
zeta1 = 1.0
freq2 = 10.0
zeta2 = 1.0
Cz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)
Cz_frd = ctrl.sys2frd(Cz, freq)
print('PID controller was designed.')
# Design phase lead filter
zeta1 = 0.7
freq1 = 40
zeta2 = 0.7
freq2 = 60
PLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)
PLz1_frd = ctrl.sys2frd(PLz1, freq)
PLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)
PLz2_frd = ctrl.sys2frd(PLz2, freq)
print('Phase lead filters were desinged.')
print('Frequency respose alanysis is running...')
# Motor side
Gn1_frd = Pnz1_frd * Cz_frd
Sn1_frd = 1/(1 + Gn1_frd)
Tn1_frd = 1 - Sn1_frd
Gn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd
Sn1_pl_frd = 1/(1 + Gn1_pl_frd)
Tn1_pl_frd = 1 - Sn1_pl_frd
# Load side
Gn2_frd = Pnz2_frd * Cz_frd
Sn2_frd = 1/(1 + Gn2_frd)
Tn2_frd = 1 - Sn2_frd
Gn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd
Sn2_pl_frd = 1/(1 + Gn2_pl_frd)
Tn2_pl_frd = 1 - Sn2_pl_frd
print('Plotting figures...')
# Plant
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of plant')
plot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName+'/freq_P.png')
# PID controller
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange, title='Frequency response of PID controller')
plot.savefig(figurefolderName+'/freq_C.png')
# Phase lead filters
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of filters')
plot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange, [-10, 10], legend=['PL for motor side', 'PL for load side'])
plot.savefig(figurefolderName+'/freq_PL.png')
# Open loop function
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of open loop transfer function')
plot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])
plot.savefig(figurefolderName+'/freq_G.png')
# Sensitivity function
fig = plot.makefig()
ax_mag = fig.add_subplot(111)
ax_phase = None
plot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.savefig(figurefolderName+'/freq_S.png')
# Complementary sensitivity function
fig = plot.makefig()
ax_mag = fig.add_subplot(211)
ax_phase = fig.add_subplot(212)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of complementary sensitivity function')
plot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.savefig(figurefolderName+'/freq_T.png')
# Nyquist
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist.png')
fig = plot.makefig()
ax = fig.add_subplot(111)
plot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')
plot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)
plot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)
plot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5], yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])
plot.plot_nyquist_assistline(ax)
plot.savefig(figurefolderName+'/nyquist_.png')
print('Finished.')
|
flexible
|
{
"blob_id": "ad1aa69f92f104ac8b82aca3c0a64ce3de48b36d",
"index": 3847,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Start simulation!')\n<mask token>\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\n<mask token>\nprint('Common parameters were set.')\n<mask token>\nprint('Plant model was set.')\n<mask token>\nprint('PID controller was designed.')\n<mask token>\nprint('Phase lead filters were desinged.')\nprint('Frequency respose alanysis is running...')\n<mask token>\nprint('Plotting figures...')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_P.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Frequency response of PID controller')\nplot.savefig(figurefolderName + '/freq_C.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName + '/freq_PL.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_G.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_S.png')\n<mask token>\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_T.png')\n<mask token>\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',\n 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist.png')\n<mask token>\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],\n yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist_.png')\nprint('Finished.')\n",
"step-3": "<mask token>\nprint('Start simulation!')\nfigurefolderName = 'figure_2mass_pl'\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\nTs = 1 / 4000\ndataNum = 10000\nfreqrange = [1, 1000]\nfreq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum,\n base=10)\ns = ctrl.tf([1, 0], [1])\nz = ctrl.tf([1, 0], [1], Ts)\nprint('Common parameters were set.')\nM1 = 1.0\nM2 = 1.0\nM = M1 + M2\nC = 10.0\nK = 0.0\nCreso = 10.0\nKreso = 50000.0\nk1 = M2 / (M1 * (M1 + M2))\nk2 = -1.0 / (M1 + M2)\nomegaPreso = np.sqrt(Kreso * (M1 + M2) / (M1 * M2))\nzetaPreso = 0.5 * Creso * np.sqrt((M1 + M2) / (Kreso * M1 * M2))\nPmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nPmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nnumDelay, denDelay = matlab.pade(Ts * 4, n=4)\nDs = ctrl.tf(numDelay, denDelay)\nDz = z ** -4\nPns1 = Pmechs1 * Ds\nPns2 = Pmechs2 * Ds\nPnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz\nPnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz\nPnz1_frd = ctrl.sys2frd(Pnz1, freq)\nPnz2_frd = ctrl.sys2frd(Pnz2, freq)\nprint('Plant model was set.')\nfreq1 = 10.0\nzeta1 = 1.0\nfreq2 = 10.0\nzeta2 = 1.0\nCz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)\nCz_frd = ctrl.sys2frd(Cz, freq)\nprint('PID controller was designed.')\nzeta1 = 0.7\nfreq1 = 40\nzeta2 = 0.7\nfreq2 = 60\nPLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)\nPLz1_frd = ctrl.sys2frd(PLz1, freq)\nPLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)\nPLz2_frd = ctrl.sys2frd(PLz2, freq)\nprint('Phase lead filters were desinged.')\nprint('Frequency respose alanysis is running...')\nGn1_frd = Pnz1_frd * Cz_frd\nSn1_frd = 1 / (1 + Gn1_frd)\nTn1_frd = 1 - Sn1_frd\nGn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd\nSn1_pl_frd = 1 / (1 + Gn1_pl_frd)\nTn1_pl_frd = 1 - Sn1_pl_frd\nGn2_frd = Pnz2_frd * Cz_frd\nSn2_frd = 1 / (1 + Gn2_frd)\nTn2_frd = 1 - Sn2_frd\nGn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd\nSn2_pl_frd = 1 / (1 + Gn2_pl_frd)\nTn2_pl_frd = 1 - Sn2_pl_frd\nprint('Plotting figures...')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_P.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Frequency response of PID controller')\nplot.savefig(figurefolderName + '/freq_C.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName + '/freq_PL.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_G.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(111)\nax_phase = None\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_S.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_T.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',\n 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],\n yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist_.png')\nprint('Finished.')\n",
"step-4": "from pylib_sakata import init as init\nimport os\nimport shutil\nimport numpy as np\nfrom control import matlab\nfrom pylib_sakata import ctrl\nfrom pylib_sakata import plot\nprint('Start simulation!')\nfigurefolderName = 'figure_2mass_pl'\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\nTs = 1 / 4000\ndataNum = 10000\nfreqrange = [1, 1000]\nfreq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum,\n base=10)\ns = ctrl.tf([1, 0], [1])\nz = ctrl.tf([1, 0], [1], Ts)\nprint('Common parameters were set.')\nM1 = 1.0\nM2 = 1.0\nM = M1 + M2\nC = 10.0\nK = 0.0\nCreso = 10.0\nKreso = 50000.0\nk1 = M2 / (M1 * (M1 + M2))\nk2 = -1.0 / (M1 + M2)\nomegaPreso = np.sqrt(Kreso * (M1 + M2) / (M1 * M2))\nzetaPreso = 0.5 * Creso * np.sqrt((M1 + M2) / (Kreso * M1 * M2))\nPmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nPmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2 * zetaPreso *\n omegaPreso, omegaPreso ** 2])\nnumDelay, denDelay = matlab.pade(Ts * 4, n=4)\nDs = ctrl.tf(numDelay, denDelay)\nDz = z ** -4\nPns1 = Pmechs1 * Ds\nPns2 = Pmechs2 * Ds\nPnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz\nPnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz\nPnz1_frd = ctrl.sys2frd(Pnz1, freq)\nPnz2_frd = ctrl.sys2frd(Pnz2, freq)\nprint('Plant model was set.')\nfreq1 = 10.0\nzeta1 = 1.0\nfreq2 = 10.0\nzeta2 = 1.0\nCz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)\nCz_frd = ctrl.sys2frd(Cz, freq)\nprint('PID controller was designed.')\nzeta1 = 0.7\nfreq1 = 40\nzeta2 = 0.7\nfreq2 = 60\nPLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)\nPLz1_frd = ctrl.sys2frd(PLz1, freq)\nPLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)\nPLz2_frd = ctrl.sys2frd(PLz2, freq)\nprint('Phase lead filters were desinged.')\nprint('Frequency respose alanysis is running...')\nGn1_frd = Pnz1_frd * Cz_frd\nSn1_frd = 1 / (1 + Gn1_frd)\nTn1_frd = 1 - Sn1_frd\nGn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd\nSn1_pl_frd = 1 / (1 + Gn1_pl_frd)\nTn1_pl_frd = 1 - Sn1_pl_frd\nGn2_frd = Pnz2_frd * Cz_frd\nSn2_frd = 1 / (1 + Gn2_frd)\nTn2_frd = 1 - Sn2_frd\nGn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd\nSn2_pl_frd = 1 / (1 + Gn2_pl_frd)\nTn2_pl_frd = 1 - Sn2_pl_frd\nprint('Plotting figures...')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_P.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange,\n title='Frequency response of PID controller')\nplot.savefig(figurefolderName + '/freq_C.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange,\n [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName + '/freq_PL.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange,\n legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName + '/freq_G.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(111)\nax_phase = None\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_S.png')\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title=\n 'Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange,\n [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.savefig(figurefolderName + '/freq_T.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side',\n 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist.png')\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5],\n yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF',\n 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName + '/nyquist_.png')\nprint('Finished.')\n",
"step-5": "# Copyright (c) 2021 Koichi Sakata\n\n\nfrom pylib_sakata import init as init\n# uncomment the follows when the file is executed in a Python console.\n# init.close_all()\n# init.clear_all()\n\nimport os\nimport shutil\nimport numpy as np\nfrom control import matlab\nfrom pylib_sakata import ctrl\nfrom pylib_sakata import plot\n\nprint('Start simulation!')\n\n# Common parameters\nfigurefolderName = 'figure_2mass_pl'\nif os.path.exists(figurefolderName):\n shutil.rmtree(figurefolderName)\nos.makedirs(figurefolderName)\nTs = 1/4000\ndataNum = 10000\nfreqrange = [1, 1000]\nfreq = np.logspace(np.log10(freqrange[0]), np.log10(freqrange[1]), dataNum, base=10)\ns = ctrl.tf([1, 0], [1])\nz = ctrl.tf([1, 0], [1], Ts)\nprint('Common parameters were set.')\n\n# Plant model\nM1 = 1.0\nM2 = 1.0\nM = M1 + M2\nC = 10.0\nK = 0.0\nCreso = 10.0\nKreso = 50000.0\nk1 = M2/(M1 * (M1 + M2))\nk2 = -1.0/(M1 + M2)\nomegaPreso = np.sqrt(Kreso * (M1 + M2)/(M1 * M2))\nzetaPreso = 0.5 * Creso*np.sqrt((M1 + M2)/(Kreso * M1 * M2))\nPmechs1 = ctrl.tf([1], [M, C, K]) + k1 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])\nPmechs2 = ctrl.tf([1], [M, C, K]) + k2 * ctrl.tf([1], [1, 2*zetaPreso*omegaPreso, omegaPreso**2])\nnumDelay, denDelay = matlab.pade(Ts*4, n=4)\nDs = ctrl.tf(numDelay, denDelay)\nDz = z**-4\nPns1 = Pmechs1 * Ds\nPns2 = Pmechs2 * Ds\nPnz1 = ctrl.c2d(Pmechs1, Ts, method='zoh') * Dz\nPnz2 = ctrl.c2d(Pmechs2, Ts, method='zoh') * Dz\nPnz1_frd = ctrl.sys2frd(Pnz1, freq)\nPnz2_frd = ctrl.sys2frd(Pnz2, freq)\nprint('Plant model was set.')\n\n# Design PID controller\nfreq1 = 10.0\nzeta1 = 1.0\nfreq2 = 10.0\nzeta2 = 1.0\nCz = ctrl.pid(freq1, zeta1, freq2, zeta2, M, C, K, Ts)\nCz_frd = ctrl.sys2frd(Cz, freq)\nprint('PID controller was designed.')\n\n# Design phase lead filter\nzeta1 = 0.7\nfreq1 = 40\nzeta2 = 0.7\nfreq2 = 60\nPLz1 = ctrl.pl2nd(freq1, zeta1, freq2, zeta2, Ts)\nPLz1_frd = ctrl.sys2frd(PLz1, freq)\nPLz2 = ctrl.pl2nd(freq2, zeta2, freq1, zeta1, Ts)\nPLz2_frd = ctrl.sys2frd(PLz2, freq)\nprint('Phase lead filters were desinged.')\n\nprint('Frequency respose alanysis is running...')\n# Motor side\nGn1_frd = Pnz1_frd * Cz_frd\nSn1_frd = 1/(1 + Gn1_frd)\nTn1_frd = 1 - Sn1_frd\n\nGn1_pl_frd = Pnz1_frd * Cz_frd * PLz1_frd\nSn1_pl_frd = 1/(1 + Gn1_pl_frd)\nTn1_pl_frd = 1 - Sn1_pl_frd\n\n# Load side\nGn2_frd = Pnz2_frd * Cz_frd\nSn2_frd = 1/(1 + Gn2_frd)\nTn2_frd = 1 - Sn2_frd\n\nGn2_pl_frd = Pnz2_frd * Cz_frd * PLz2_frd\nSn2_pl_frd = 1/(1 + Gn2_pl_frd)\nTn2_pl_frd = 1 - Sn2_pl_frd\n\nprint('Plotting figures...')\n# Plant\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Pnz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of plant')\nplot.plot_tffrd(ax_mag, ax_phase, Pnz2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName+'/freq_P.png')\n\n# PID controller\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Cz_frd, '-', 'b', 1.5, 1.0, freqrange, title='Frequency response of PID controller')\nplot.savefig(figurefolderName+'/freq_C.png')\n\n# Phase lead filters\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, PLz1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of filters')\nplot.plot_tffrd(ax_mag, ax_phase, PLz2_frd, '-', 'r', 1.5, 1.0, freqrange, [-10, 10], legend=['PL for motor side', 'PL for load side'])\nplot.savefig(figurefolderName+'/freq_PL.png')\n\n# Open loop function\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Gn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of open loop transfer function')\nplot.plot_tffrd(ax_mag, ax_phase, Gn2_frd, '-', 'r', 1.5, 1.0, freqrange, legend=['Motor side', 'Load side'])\nplot.savefig(figurefolderName+'/freq_G.png')\n\n# Sensitivity function\nfig = plot.makefig()\nax_mag = fig.add_subplot(111)\nax_phase = None\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Sn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.savefig(figurefolderName+'/freq_S.png')\n\n# Complementary sensitivity function\nfig = plot.makefig()\nax_mag = fig.add_subplot(211)\nax_phase = fig.add_subplot(212)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_frd, '-', 'b', 1.5, 1.0, title='Frequency response of complementary sensitivity function')\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_tffrd(ax_mag, ax_phase, Tn2_pl_frd, '-', 'm', 1.5, 1.0, freqrange, [-60, 20], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.savefig(figurefolderName+'/freq_T.png')\n\n# Nyquist\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName+'/nyquist.png')\n\nfig = plot.makefig()\nax = fig.add_subplot(111)\nplot.plot_nyquist(ax, Gn1_frd, '-', 'b', 1.5, 1.0, title='Nyquist Diagram')\nplot.plot_nyquist(ax, Gn2_frd, '-', 'r', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn1_pl_frd, '-', 'c', 1.5, 1.0)\nplot.plot_nyquist(ax, Gn2_pl_frd, '-', 'm', 1.5, 1.0, xrange=[-5, 5], yrange=[-5, 5], legend=['Motor side', 'Load side', 'Motor side with NF', 'Load side with NF'])\nplot.plot_nyquist_assistline(ax)\nplot.savefig(figurefolderName+'/nyquist_.png')\n\nprint('Finished.')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'trees'
urlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(
'create/', TreeCreateView.as_view(), name='create'), path(
'<int:pk>/update/', TreeCreateView.as_view(), name='update')]
<|reserved_special_token_1|>
from django.urls import path
from .views import TreeCreateView, TreeListView, TreeUpdateView
app_name = 'trees'
urlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(
'create/', TreeCreateView.as_view(), name='create'), path(
'<int:pk>/update/', TreeCreateView.as_view(), name='update')]
<|reserved_special_token_1|>
from django.urls import path
from .views import (
TreeCreateView,
TreeListView,
TreeUpdateView,
)
app_name = 'trees'
urlpatterns = [
path('list/', TreeListView.as_view(),
name='list'),
path('create/', TreeCreateView.as_view(),
name='create'),
path('<int:pk>/update/', TreeCreateView.as_view(),
name='update'),
]
|
flexible
|
{
"blob_id": "0c1de2c1eb5a4de7aeb14ad6b27aa61e07bc4c51",
"index": 602,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'trees'\nurlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(\n 'create/', TreeCreateView.as_view(), name='create'), path(\n '<int:pk>/update/', TreeCreateView.as_view(), name='update')]\n",
"step-3": "from django.urls import path\nfrom .views import TreeCreateView, TreeListView, TreeUpdateView\napp_name = 'trees'\nurlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(\n 'create/', TreeCreateView.as_view(), name='create'), path(\n '<int:pk>/update/', TreeCreateView.as_view(), name='update')]\n",
"step-4": "from django.urls import path\nfrom .views import (\n TreeCreateView,\n TreeListView,\n TreeUpdateView,\n)\n\n\napp_name = 'trees'\n\nurlpatterns = [\n path('list/', TreeListView.as_view(),\n name='list'),\n path('create/', TreeCreateView.as_view(),\n name='create'),\n path('<int:pk>/update/', TreeCreateView.as_view(),\n name='update'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def test_get_from_hell():
try:
url = (
'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'
)
print('\n test 1, ', url)
result = requests.get(url)
display_response(result)
except Exception as e:
print('POST got exception = ', e)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def display_response(rsp):
try:
print('Printing a response.')
print('HTTP status code: ', rsp.status_code)
h = dict(rsp.headers)
print('Response headers: \n', json.dumps(h, indent=2, default=str))
try:
body = rsp.json()
print('JSON body: \n', json.dumps(body, indent=2, default=str))
except Exception as e:
body = rsp.text
print('Text body: \n', body)
except Exception as e:
print('display_response got exception e = ', e)
def test_get_from_hell():
try:
url = (
'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'
)
print('\n test 1, ', url)
result = requests.get(url)
display_response(result)
except Exception as e:
print('POST got exception = ', e)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def display_response(rsp):
try:
print('Printing a response.')
print('HTTP status code: ', rsp.status_code)
h = dict(rsp.headers)
print('Response headers: \n', json.dumps(h, indent=2, default=str))
try:
body = rsp.json()
print('JSON body: \n', json.dumps(body, indent=2, default=str))
except Exception as e:
body = rsp.text
print('Text body: \n', body)
except Exception as e:
print('display_response got exception e = ', e)
def test_get_from_hell():
try:
url = (
'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'
)
print('\n test 1, ', url)
result = requests.get(url)
display_response(result)
except Exception as e:
print('POST got exception = ', e)
test_get_from_hell()
<|reserved_special_token_1|>
import requests
import json
def display_response(rsp):
try:
print('Printing a response.')
print('HTTP status code: ', rsp.status_code)
h = dict(rsp.headers)
print('Response headers: \n', json.dumps(h, indent=2, default=str))
try:
body = rsp.json()
print('JSON body: \n', json.dumps(body, indent=2, default=str))
except Exception as e:
body = rsp.text
print('Text body: \n', body)
except Exception as e:
print('display_response got exception e = ', e)
def test_get_from_hell():
try:
url = (
'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'
)
print('\n test 1, ', url)
result = requests.get(url)
display_response(result)
except Exception as e:
print('POST got exception = ', e)
test_get_from_hell()
<|reserved_special_token_1|>
import requests
import json
def display_response(rsp):
try:
print("Printing a response.")
print("HTTP status code: ", rsp.status_code)
h = dict(rsp.headers)
print("Response headers: \n", json.dumps(h, indent=2, default=str))
try:
body = rsp.json()
print("JSON body: \n", json.dumps(body, indent=2, default=str))
except Exception as e:
body = rsp.text
print("Text body: \n", body)
except Exception as e:
print("display_response got exception e = ", e)
def test_get_from_hell():
try:
url = "http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all"
print("\n test 1, ", url)
result = requests.get(url)
display_response(result)
except Exception as e:
print("POST got exception = ", e)
test_get_from_hell()
|
flexible
|
{
"blob_id": "31761b9469cc579c209e070fbe7b71943404a1ff",
"index": 3992,
"step-1": "<mask token>\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef display_response(rsp):\n try:\n print('Printing a response.')\n print('HTTP status code: ', rsp.status_code)\n h = dict(rsp.headers)\n print('Response headers: \\n', json.dumps(h, indent=2, default=str))\n try:\n body = rsp.json()\n print('JSON body: \\n', json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print('Text body: \\n', body)\n except Exception as e:\n print('display_response got exception e = ', e)\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef display_response(rsp):\n try:\n print('Printing a response.')\n print('HTTP status code: ', rsp.status_code)\n h = dict(rsp.headers)\n print('Response headers: \\n', json.dumps(h, indent=2, default=str))\n try:\n body = rsp.json()\n print('JSON body: \\n', json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print('Text body: \\n', body)\n except Exception as e:\n print('display_response got exception e = ', e)\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\ntest_get_from_hell()\n",
"step-4": "import requests\nimport json\n\n\ndef display_response(rsp):\n try:\n print('Printing a response.')\n print('HTTP status code: ', rsp.status_code)\n h = dict(rsp.headers)\n print('Response headers: \\n', json.dumps(h, indent=2, default=str))\n try:\n body = rsp.json()\n print('JSON body: \\n', json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print('Text body: \\n', body)\n except Exception as e:\n print('display_response got exception e = ', e)\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\ntest_get_from_hell()\n",
"step-5": "import requests\nimport json\n\ndef display_response(rsp):\n\n try:\n print(\"Printing a response.\")\n print(\"HTTP status code: \", rsp.status_code)\n h = dict(rsp.headers)\n print(\"Response headers: \\n\", json.dumps(h, indent=2, default=str))\n\n try:\n body = rsp.json()\n print(\"JSON body: \\n\", json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print(\"Text body: \\n\", body)\n\n except Exception as e:\n print(\"display_response got exception e = \", e)\n\n\ndef test_get_from_hell():\n\n\n try:\n\n\n url = \"http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all\"\n print(\"\\n test 1, \", url)\n result = requests.get(url)\n display_response(result)\n\n\n except Exception as e:\n print(\"POST got exception = \", e)\n\n\ntest_get_from_hell()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from math import *
def eval_loop():
line = input('Please enter a sting')
while True:
if line == 'done':
break
else:
output = eval(line)
print(output)
line = input('Please enter a sting')
eval_loop()
|
normal
|
{
"blob_id": "b0062dde448c450131f578a2afe130ca663f0902",
"index": 2041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef eval_loop():\n line = input('Please enter a sting')\n while True:\n if line == 'done':\n break\n else:\n output = eval(line)\n print(output)\n line = input('Please enter a sting')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef eval_loop():\n line = input('Please enter a sting')\n while True:\n if line == 'done':\n break\n else:\n output = eval(line)\n print(output)\n line = input('Please enter a sting')\n\n\neval_loop()\n",
"step-4": "from math import *\n\n\ndef eval_loop():\n line = input('Please enter a sting')\n while True:\n if line == 'done':\n break\n else:\n output = eval(line)\n print(output)\n line = input('Please enter a sting')\n\n\neval_loop()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def tort(n, a, b):
return min(n * a, b)
def main():
n, a, b = map(int, input().split())
print(tort(n, a, b))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "7c06bd52c924d3e401f50625109c5b8b489df157",
"index": 7434,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n n, a, b = map(int, input().split())\n print(tort(n, a, b))\n\n\n<mask token>\n",
"step-3": "def tort(n, a, b):\n return min(n * a, b)\n\n\ndef main():\n n, a, b = map(int, input().split())\n print(tort(n, a, b))\n\n\n<mask token>\n",
"step-4": "def tort(n, a, b):\n return min(n * a, b)\n\n\ndef main():\n n, a, b = map(int, input().split())\n print(tort(n, a, b))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import argparse, os, joblib, json, torch
import pandas as pd
from utils import regression, dataset, lstm
PREDICT_X_SKIP_COLS = ["date", "weight", "ts_id", "resp", "resp_1", "resp_2", "resp_3", "resp_4"]
X_COLS = ["resp_1", "resp_2", "resp_3", "resp_4"]
Y_OUTPUT_COLS = ["date", "ts_id"]
Y_COL = ["resp"]
METRICS_INFO = ["mse", "r2", "mape"]
DROPOUT = 0.25
HIDDEN_SIZE = 20
def get_prediction_data(data, model_path):
x = data.drop(PREDICT_X_SKIP_COLS, axis=1)
y = data[X_COLS]
model = joblib.load(model_path)
(y_pred, metrics) = regression.evaluate(model, x, y, METRICS_INFO)
y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)
return (y_pred, metrics)
def prepare_data(data_folder, model_path):
(train, test, na_value) = dataset.read_data(data_folder)
x_train = train[X_COLS]
y_train = train[Y_COL]
x_test = test[X_COLS]
y_test = test[Y_COL]
out_train = train[Y_OUTPUT_COLS]
out_test = test[Y_OUTPUT_COLS]
(x_pred_train , metrics_train) = get_prediction_data(train, model_path)
(x_pred_test, metrics_test) = get_prediction_data(test, model_path)
train = { "x": x_train, "y": y_train, "x_pred": x_pred_train, "out": out_train}
test = { "x": x_test, "y": y_test, "x_pred": x_pred_test, "out": out_test}
metrics = {
"reg_train_pred": metrics_train,
"reg_test_pred": metrics_test
}
return (train, test, metrics, na_value)
def postprocess_data(out_data, y_pred):
y_output = out_data.copy()
y_output[Y_COL] = y_pred
return y_output
def train_evaluate(data_folder, output_folder, model_path):
model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)
print("Preparing data...")
(train, test, metrics, na_value) = prepare_data(data_folder, model_path)
print("Training...")
model = lstm.train(model, train["x"], train["y"])
model = lstm.train(model, train["x_pred"], train["y"])
print("Evaluating...")
(y_pred, metrics_lstm) = lstm.evaluate(model, test["x"],
test["y"], METRICS_INFO)
(y_pred_reg, metrics_reg_lstm) = lstm.evaluate(model,
test["x_pred"], test["y"], METRICS_INFO)
metrics["lstm_pred"] = metrics_lstm
metrics["reg_lstm_pred"] = metrics_reg_lstm
print("Postprocessing data...")
y_output = postprocess_data(test["out"], y_pred)
y_output_reg = postprocess_data(test["out"], y_pred_reg)
output_path = os.path.join(output_folder, "pred.csv")
y_output.to_csv(output_path, index=False)
output_path = os.path.join(output_folder, "pred_reg.csv")
y_output_reg.to_csv(output_path, index=False)
result = { "metrics": metrics, "na_value": na_value }
result_path = os.path.join(output_folder, "result.json")
json_config = json.dumps(result, indent=4)
with open(result_path, "w") as result_file:
result_file.write(json_config)
model_path = os.path.join(output_folder, "lstm.mdl")
torch.save(model, model_path)
print("Output files (model, result, prediction) saved to {}".format(
output_folder))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path", type=str, help="specifies the data folder path",
required=True)
parser.add_argument(
"--output_path", type=str, help="specifies the output folder path",
required=True)
parser.add_argument(
"--regression_model_path", type=str, required = True,
help="specifies the regression model path")
return vars(parser.parse_args())
def main():
args = parse_args()
print("Args: {}".format(args))
data_path = os.path.abspath(args["data_path"])
output_path = os.path.abspath(args["output_path"])
model_path = os.path.abspath(args["regression_model_path"])
train_evaluate(data_path, output_path, model_path)
main()
|
normal
|
{
"blob_id": "4bdff51a4e277889f4d54d4ace7a0f5384e74f1e",
"index": 9017,
"step-1": "<mask token>\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\nmain()\n",
"step-3": "<mask token>\nPREDICT_X_SKIP_COLS = ['date', 'weight', 'ts_id', 'resp', 'resp_1',\n 'resp_2', 'resp_3', 'resp_4']\nX_COLS = ['resp_1', 'resp_2', 'resp_3', 'resp_4']\nY_OUTPUT_COLS = ['date', 'ts_id']\nY_COL = ['resp']\nMETRICS_INFO = ['mse', 'r2', 'mape']\nDROPOUT = 0.25\nHIDDEN_SIZE = 20\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\nmain()\n",
"step-4": "import argparse, os, joblib, json, torch\nimport pandas as pd\nfrom utils import regression, dataset, lstm\nPREDICT_X_SKIP_COLS = ['date', 'weight', 'ts_id', 'resp', 'resp_1',\n 'resp_2', 'resp_3', 'resp_4']\nX_COLS = ['resp_1', 'resp_2', 'resp_3', 'resp_4']\nY_OUTPUT_COLS = ['date', 'ts_id']\nY_COL = ['resp']\nMETRICS_INFO = ['mse', 'r2', 'mape']\nDROPOUT = 0.25\nHIDDEN_SIZE = 20\n\n\ndef get_prediction_data(data, model_path):\n x = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n y = data[X_COLS]\n model = joblib.load(model_path)\n y_pred, metrics = regression.evaluate(model, x, y, METRICS_INFO)\n y_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n return y_pred, metrics\n\n\ndef prepare_data(data_folder, model_path):\n train, test, na_value = dataset.read_data(data_folder)\n x_train = train[X_COLS]\n y_train = train[Y_COL]\n x_test = test[X_COLS]\n y_test = test[Y_COL]\n out_train = train[Y_OUTPUT_COLS]\n out_test = test[Y_OUTPUT_COLS]\n x_pred_train, metrics_train = get_prediction_data(train, model_path)\n x_pred_test, metrics_test = get_prediction_data(test, model_path)\n train = {'x': x_train, 'y': y_train, 'x_pred': x_pred_train, 'out':\n out_train}\n test = {'x': x_test, 'y': y_test, 'x_pred': x_pred_test, 'out': out_test}\n metrics = {'reg_train_pred': metrics_train, 'reg_test_pred': metrics_test}\n return train, test, metrics, na_value\n\n\ndef postprocess_data(out_data, y_pred):\n y_output = out_data.copy()\n y_output[Y_COL] = y_pred\n return y_output\n\n\ndef train_evaluate(data_folder, output_folder, model_path):\n model = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n print('Preparing data...')\n train, test, metrics, na_value = prepare_data(data_folder, model_path)\n print('Training...')\n model = lstm.train(model, train['x'], train['y'])\n model = lstm.train(model, train['x_pred'], train['y'])\n print('Evaluating...')\n y_pred, metrics_lstm = lstm.evaluate(model, test['x'], test['y'],\n METRICS_INFO)\n y_pred_reg, metrics_reg_lstm = lstm.evaluate(model, test['x_pred'],\n test['y'], METRICS_INFO)\n metrics['lstm_pred'] = metrics_lstm\n metrics['reg_lstm_pred'] = metrics_reg_lstm\n print('Postprocessing data...')\n y_output = postprocess_data(test['out'], y_pred)\n y_output_reg = postprocess_data(test['out'], y_pred_reg)\n output_path = os.path.join(output_folder, 'pred.csv')\n y_output.to_csv(output_path, index=False)\n output_path = os.path.join(output_folder, 'pred_reg.csv')\n y_output_reg.to_csv(output_path, index=False)\n result = {'metrics': metrics, 'na_value': na_value}\n result_path = os.path.join(output_folder, 'result.json')\n json_config = json.dumps(result, indent=4)\n with open(result_path, 'w') as result_file:\n result_file.write(json_config)\n model_path = os.path.join(output_folder, 'lstm.mdl')\n torch.save(model, model_path)\n print('Output files (model, result, prediction) saved to {}'.format(\n output_folder))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, help=\n 'specifies the data folder path', required=True)\n parser.add_argument('--output_path', type=str, help=\n 'specifies the output folder path', required=True)\n parser.add_argument('--regression_model_path', type=str, required=True,\n help='specifies the regression model path')\n return vars(parser.parse_args())\n\n\ndef main():\n args = parse_args()\n print('Args: {}'.format(args))\n data_path = os.path.abspath(args['data_path'])\n output_path = os.path.abspath(args['output_path'])\n model_path = os.path.abspath(args['regression_model_path'])\n train_evaluate(data_path, output_path, model_path)\n\n\nmain()\n",
"step-5": "import argparse, os, joblib, json, torch\nimport pandas as pd\nfrom utils import regression, dataset, lstm\n\nPREDICT_X_SKIP_COLS = [\"date\", \"weight\", \"ts_id\", \"resp\", \"resp_1\", \"resp_2\", \"resp_3\", \"resp_4\"]\nX_COLS = [\"resp_1\", \"resp_2\", \"resp_3\", \"resp_4\"]\nY_OUTPUT_COLS = [\"date\", \"ts_id\"]\nY_COL = [\"resp\"]\nMETRICS_INFO = [\"mse\", \"r2\", \"mape\"]\nDROPOUT = 0.25\nHIDDEN_SIZE = 20\n\ndef get_prediction_data(data, model_path):\n\tx = data.drop(PREDICT_X_SKIP_COLS, axis=1)\n\ty = data[X_COLS]\n\tmodel = joblib.load(model_path)\n\t(y_pred, metrics) = regression.evaluate(model, x, y, METRICS_INFO)\n\ty_pred = pd.DataFrame(data=y_pred, columns=X_COLS)\n\treturn (y_pred, metrics)\n\ndef prepare_data(data_folder, model_path):\n\t(train, test, na_value) = dataset.read_data(data_folder)\n\tx_train = train[X_COLS]\n\ty_train = train[Y_COL]\n\tx_test = test[X_COLS]\n\ty_test = test[Y_COL]\n\tout_train = train[Y_OUTPUT_COLS]\n\tout_test = test[Y_OUTPUT_COLS]\n\t(x_pred_train , metrics_train) = get_prediction_data(train, model_path)\n\t(x_pred_test, metrics_test) = get_prediction_data(test, model_path)\n\ttrain = { \"x\": x_train, \"y\": y_train, \"x_pred\": x_pred_train, \"out\": out_train}\n\ttest = { \"x\": x_test, \"y\": y_test, \"x_pred\": x_pred_test, \"out\": out_test}\n\tmetrics = {\n\t\t\"reg_train_pred\": metrics_train,\n\t\t\"reg_test_pred\": metrics_test\n\t}\n\treturn (train, test, metrics, na_value)\n\ndef postprocess_data(out_data, y_pred):\n\ty_output = out_data.copy()\n\ty_output[Y_COL] = y_pred\n\treturn y_output\n\ndef train_evaluate(data_folder, output_folder, model_path):\n\tmodel = lstm.get_model(DROPOUT, len(X_COLS), HIDDEN_SIZE)\n\n\tprint(\"Preparing data...\")\n\t(train, test, metrics, na_value) = prepare_data(data_folder, model_path)\n\n\tprint(\"Training...\")\n\tmodel = lstm.train(model, train[\"x\"], train[\"y\"])\n\tmodel = lstm.train(model, train[\"x_pred\"], train[\"y\"])\n\n\tprint(\"Evaluating...\")\n\t(y_pred, metrics_lstm) = lstm.evaluate(model, test[\"x\"],\n\t\ttest[\"y\"], METRICS_INFO)\n\t(y_pred_reg, metrics_reg_lstm) = lstm.evaluate(model,\n\t\ttest[\"x_pred\"], test[\"y\"], METRICS_INFO)\n\tmetrics[\"lstm_pred\"] = metrics_lstm\n\tmetrics[\"reg_lstm_pred\"] = metrics_reg_lstm\n\n\tprint(\"Postprocessing data...\")\n\ty_output = postprocess_data(test[\"out\"], y_pred)\n\ty_output_reg = postprocess_data(test[\"out\"], y_pred_reg)\n\n\toutput_path = os.path.join(output_folder, \"pred.csv\")\n\ty_output.to_csv(output_path, index=False)\n\n\toutput_path = os.path.join(output_folder, \"pred_reg.csv\")\n\ty_output_reg.to_csv(output_path, index=False)\n\n\tresult = { \"metrics\": metrics, \"na_value\": na_value }\n\tresult_path = os.path.join(output_folder, \"result.json\")\n\tjson_config = json.dumps(result, indent=4)\n\twith open(result_path, \"w\") as result_file:\n\t\tresult_file.write(json_config)\n\n\tmodel_path = os.path.join(output_folder, \"lstm.mdl\")\n\ttorch.save(model, model_path)\n\tprint(\"Output files (model, result, prediction) saved to {}\".format(\n\t\toutput_folder))\n\ndef parse_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t\"--data_path\", type=str, help=\"specifies the data folder path\",\n\t\trequired=True)\n\tparser.add_argument(\n\t\t\"--output_path\", type=str, help=\"specifies the output folder path\",\n\t\trequired=True)\n\tparser.add_argument(\n\t\t\"--regression_model_path\", type=str, required = True,\n\t\thelp=\"specifies the regression model path\")\n\treturn vars(parser.parse_args())\n\ndef main():\n\targs = parse_args()\n\tprint(\"Args: {}\".format(args))\n\tdata_path = os.path.abspath(args[\"data_path\"])\n\toutput_path = os.path.abspath(args[\"output_path\"])\n\tmodel_path = os.path.abspath(args[\"regression_model_path\"])\n\ttrain_evaluate(data_path, output_path, model_path)\n\nmain()\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmagic.registry import MODELS
def test_colorization_net():
model_cfg = dict(
type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')
# build model
model = MODELS.build(model_cfg)
# test attributes
assert model.__class__.__name__ == 'ColorizationNet'
# prepare data
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = (1, 2, 256, 256)
# test on cpu
(out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \
and feature_map['out_reg'].shape == target_shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
(out_class, out_reg, feature_map) = \
model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
|
normal
|
{
"blob_id": "94be205e516c1f1248b6028419c04c927236596e",
"index": 618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_colorization_net():\n model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,\n norm_type='batch')\n model = MODELS.build(model_cfg)\n assert model.__class__.__name__ == 'ColorizationNet'\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n target_shape = 1, 2, 256, 256\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[\n 'out_reg'].shape == target_shape\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-3": "import torch\nfrom mmagic.registry import MODELS\n\n\ndef test_colorization_net():\n model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,\n norm_type='batch')\n model = MODELS.build(model_cfg)\n assert model.__class__.__name__ == 'ColorizationNet'\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n target_shape = 1, 2, 256, 256\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[\n 'out_reg'].shape == target_shape\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-4": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmagic.registry import MODELS\n\n\ndef test_colorization_net():\n\n model_cfg = dict(\n type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')\n\n # build model\n model = MODELS.build(model_cfg)\n\n # test attributes\n assert model.__class__.__name__ == 'ColorizationNet'\n\n # prepare data\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n\n target_shape = (1, 2, 256, 256)\n\n # test on cpu\n (out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \\\n and feature_map['out_reg'].shape == target_shape\n\n # test on gpu\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n (out_class, out_reg, feature_map) = \\\n model(input_A, input_B, mask_B)\n\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
q.append((1, 0, 0))
while q:
e, clip, t = q.popleft()
if e == s:
print(t)
exit(0)
if 0 < e < 1001:
if visited[e][e] is False:
visited[e][e] = True
q.append((e, e, t + 1))
if e + clip < 1001 and visited[e + clip][clip] is False:
visited[e + clip][clip] = True
q.append((e + clip, clip, t + 1))
if visited[e - 1][clip] is False:
visited[e - 1][clip] = True
q.append((e - 1, clip, t + 1))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = int(input())
q = deque()
visited = [([False] * 1001) for _ in range(1001)]
visited[1][0] = True
q.append((1, 0, 0))
while q:
e, clip, t = q.popleft()
if e == s:
print(t)
exit(0)
if 0 < e < 1001:
if visited[e][e] is False:
visited[e][e] = True
q.append((e, e, t + 1))
if e + clip < 1001 and visited[e + clip][clip] is False:
visited[e + clip][clip] = True
q.append((e + clip, clip, t + 1))
if visited[e - 1][clip] is False:
visited[e - 1][clip] = True
q.append((e - 1, clip, t + 1))
<|reserved_special_token_1|>
from collections import deque
s = int(input())
q = deque()
visited = [([False] * 1001) for _ in range(1001)]
visited[1][0] = True
q.append((1, 0, 0))
while q:
e, clip, t = q.popleft()
if e == s:
print(t)
exit(0)
if 0 < e < 1001:
if visited[e][e] is False:
visited[e][e] = True
q.append((e, e, t + 1))
if e + clip < 1001 and visited[e + clip][clip] is False:
visited[e + clip][clip] = True
q.append((e + clip, clip, t + 1))
if visited[e - 1][clip] is False:
visited[e - 1][clip] = True
q.append((e - 1, clip, t + 1))
<|reserved_special_token_1|>
# https://kyu9341.github.io/algorithm/2020/03/11/algorithm14226/
# https://developingbear.tistory.com/138
# https://devbelly.tistory.com/108
# 이모티콘 s개 생성
# 3가지 연산 이용
# bfs 이용 => visited를 이모티콘 방문 여부 2차원 배열 => 이모티콘의 수 와 클립보드에 저장된 이모티콘의 갯수를 이용
from collections import deque
s = int(input())
q = deque()
# visited[이모티콘의 수][클리보드의 이모티콘 수]
visited = [[False] * 1001 for _ in range(1001)]
visited[1][0] = True
# 이모티콘의 수, 클립보드의 수, 시간
q.append((1, 0, 0))
while q:
e, clip, t = q.popleft()
if e == s:
print(t)
exit(0)
if 0 < e < 1001:
if visited[e][e] is False:
visited[e][e] = True
q.append((e, e, t + 1))
# clip이 0 이상 조건이 필요없음 어차피 위에서 e가 0보다 큰걸로 조건 수행했으므로
if e + clip < 1001 and visited[e + clip][clip] is False:
visited[e + clip][clip] = True
q.append((e + clip, clip, t + 1))
# e가 1000을 넘을때만 수행하는 것이 아닌 모든 경우에 대해서 탐색을 하기 위해서 e에 대한 조건을 걸지 않음
if visited[e - 1][clip] is False:
visited[e - 1][clip] = True
q.append((e - 1, clip, t + 1))
|
flexible
|
{
"blob_id": "0c14a6fa8b25e1791a6eb9c71290db8bb316819a",
"index": 5684,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nq.append((1, 0, 0))\nwhile q:\n e, clip, t = q.popleft()\n if e == s:\n print(t)\n exit(0)\n if 0 < e < 1001:\n if visited[e][e] is False:\n visited[e][e] = True\n q.append((e, e, t + 1))\n if e + clip < 1001 and visited[e + clip][clip] is False:\n visited[e + clip][clip] = True\n q.append((e + clip, clip, t + 1))\n if visited[e - 1][clip] is False:\n visited[e - 1][clip] = True\n q.append((e - 1, clip, t + 1))\n",
"step-3": "<mask token>\ns = int(input())\nq = deque()\nvisited = [([False] * 1001) for _ in range(1001)]\nvisited[1][0] = True\nq.append((1, 0, 0))\nwhile q:\n e, clip, t = q.popleft()\n if e == s:\n print(t)\n exit(0)\n if 0 < e < 1001:\n if visited[e][e] is False:\n visited[e][e] = True\n q.append((e, e, t + 1))\n if e + clip < 1001 and visited[e + clip][clip] is False:\n visited[e + clip][clip] = True\n q.append((e + clip, clip, t + 1))\n if visited[e - 1][clip] is False:\n visited[e - 1][clip] = True\n q.append((e - 1, clip, t + 1))\n",
"step-4": "from collections import deque\ns = int(input())\nq = deque()\nvisited = [([False] * 1001) for _ in range(1001)]\nvisited[1][0] = True\nq.append((1, 0, 0))\nwhile q:\n e, clip, t = q.popleft()\n if e == s:\n print(t)\n exit(0)\n if 0 < e < 1001:\n if visited[e][e] is False:\n visited[e][e] = True\n q.append((e, e, t + 1))\n if e + clip < 1001 and visited[e + clip][clip] is False:\n visited[e + clip][clip] = True\n q.append((e + clip, clip, t + 1))\n if visited[e - 1][clip] is False:\n visited[e - 1][clip] = True\n q.append((e - 1, clip, t + 1))\n",
"step-5": "# https://kyu9341.github.io/algorithm/2020/03/11/algorithm14226/\n# https://developingbear.tistory.com/138\n# https://devbelly.tistory.com/108\n# 이모티콘 s개 생성\n# 3가지 연산 이용\n# bfs 이용 => visited를 이모티콘 방문 여부 2차원 배열 => 이모티콘의 수 와 클립보드에 저장된 이모티콘의 갯수를 이용\nfrom collections import deque\ns = int(input())\nq = deque()\n# visited[이모티콘의 수][클리보드의 이모티콘 수]\nvisited = [[False] * 1001 for _ in range(1001)]\nvisited[1][0] = True\n# 이모티콘의 수, 클립보드의 수, 시간\nq.append((1, 0, 0))\nwhile q:\n e, clip, t = q.popleft()\n if e == s:\n print(t)\n exit(0)\n\n if 0 < e < 1001:\n if visited[e][e] is False:\n visited[e][e] = True\n q.append((e, e, t + 1))\n # clip이 0 이상 조건이 필요없음 어차피 위에서 e가 0보다 큰걸로 조건 수행했으므로\n if e + clip < 1001 and visited[e + clip][clip] is False:\n visited[e + clip][clip] = True\n q.append((e + clip, clip, t + 1))\n # e가 1000을 넘을때만 수행하는 것이 아닌 모든 경우에 대해서 탐색을 하기 위해서 e에 대한 조건을 걸지 않음\n if visited[e - 1][clip] is False:\n visited[e - 1][clip] = True\n q.append((e - 1, clip, t + 1))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MemberClient(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MemberClient(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return '{0}'.format(self.client)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MemberClient(models.Model):
created = models.DateTimeField(auto_now_add=timezone.now())
client = models.ForeignKey(AllUser, related_name='client', default=None,
on_delete=models.CASCADE)
member = models.ForeignKey(AllUser, related_name='member', default=None,
on_delete=models.CASCADE)
profile = models.ForeignKey(Profile, related_name='profile', default=
None, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return '{0}'.format(self.client)
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from accounts.models import AllUser
from profiles.models import Profile
class MemberClient(models.Model):
created = models.DateTimeField(auto_now_add=timezone.now())
client = models.ForeignKey(AllUser, related_name='client', default=None,
on_delete=models.CASCADE)
member = models.ForeignKey(AllUser, related_name='member', default=None,
on_delete=models.CASCADE)
profile = models.ForeignKey(Profile, related_name='profile', default=
None, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return '{0}'.format(self.client)
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from accounts.models import AllUser
from profiles.models import Profile
### MODEL HOLDING MEMBER TO CLIENT RELATIONSHIPS. ###
class MemberClient(models.Model):
created = models.DateTimeField(auto_now_add=timezone.now())
client = models.ForeignKey(AllUser,
related_name='client',
default=None,
on_delete=models.CASCADE)
member = models.ForeignKey(AllUser,
related_name='member',
default=None,
on_delete=models.CASCADE)
profile = models.ForeignKey(Profile,
related_name='profile',
default=None,
on_delete=models.CASCADE,
blank=True,
null=True)
def __str__(self):
return "{0}".format(self.client)
|
flexible
|
{
"blob_id": "b419e26cbf5bbb746f897367ddaa829773a6860c",
"index": 7742,
"step-1": "<mask token>\n\n\nclass MemberClient(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MemberClient(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '{0}'.format(self.client)\n",
"step-3": "<mask token>\n\n\nclass MemberClient(models.Model):\n created = models.DateTimeField(auto_now_add=timezone.now())\n client = models.ForeignKey(AllUser, related_name='client', default=None,\n on_delete=models.CASCADE)\n member = models.ForeignKey(AllUser, related_name='member', default=None,\n on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile, related_name='profile', default=\n None, on_delete=models.CASCADE, blank=True, null=True)\n\n def __str__(self):\n return '{0}'.format(self.client)\n",
"step-4": "from django.db import models\nfrom django.utils import timezone\nfrom accounts.models import AllUser\nfrom profiles.models import Profile\n\n\nclass MemberClient(models.Model):\n created = models.DateTimeField(auto_now_add=timezone.now())\n client = models.ForeignKey(AllUser, related_name='client', default=None,\n on_delete=models.CASCADE)\n member = models.ForeignKey(AllUser, related_name='member', default=None,\n on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile, related_name='profile', default=\n None, on_delete=models.CASCADE, blank=True, null=True)\n\n def __str__(self):\n return '{0}'.format(self.client)\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\nfrom accounts.models import AllUser\nfrom profiles.models import Profile\n\n### MODEL HOLDING MEMBER TO CLIENT RELATIONSHIPS. ###\n\nclass MemberClient(models.Model):\n created = models.DateTimeField(auto_now_add=timezone.now())\n client = models.ForeignKey(AllUser, \n related_name='client', \n default=None, \n on_delete=models.CASCADE)\n member = models.ForeignKey(AllUser,\n related_name='member', \n default=None, \n on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile,\n related_name='profile', \n default=None, \n on_delete=models.CASCADE,\n blank=True,\n null=True)\n \n def __str__(self):\n return \"{0}\".format(self.client)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
returns system activity (top)
"""
import collections
import tempfile
import subprocess
import os
import sys
import ujson
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
is_header = True
tidpid = dict()
for line in subprocess.run(['/usr/bin/procstat','-ath'], capture_output=True, text=True).stdout.split('\n'):
parts = line.split(maxsplit=2)
if len(parts) > 1:
tidpid[parts[1]] = parts[0]
# grab second display so that CPU time data appears
sp = subprocess.run(['/usr/bin/top','-aHSTn','-d2','999999'], capture_output=True, text=True)
topData = sp.stdout.strip().split('\n\n',2)[-1]
for line in topData.split('\n'):
# end of header, start of top detection
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
# parse headers from top command, add to result
if len(line.strip()) > 0:
result['headers'].append(line)
else:
# parse details including fieldnames (leave original)
if fieldnames is None:
fieldnames = ['PID'] + line.split()
else:
tmp = line.split(maxsplit=10)
record = {'C': '0'}
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == 0: # PID
record[fieldname] = tidpid[tmp[0]] if tmp[0] in tidpid else ''
else:
record[fieldname] = tmp[field_id - 1]
if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
# output as json
print(ujson.dumps(result))
else:
# output plain (reconstruct data)
for header_line in result['headers']:
print (header_line)
print ("\n")
if fieldnames is not None:
format_str = ""
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)
header_fields[fieldname] = fieldname
print (format_str % header_fields)
for detail_line in result['details']:
print (format_str % detail_line)
|
normal
|
{
"blob_id": "f4ae34be2be2b47b3394e6da751c53c51a1c3174",
"index": 6678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n fieldnames = None\n field_max_width = dict()\n result = {'headers': [], 'details': []}\n is_header = True\n tidpid = dict()\n for line in subprocess.run(['/usr/bin/procstat', '-ath'],\n capture_output=True, text=True).stdout.split('\\n'):\n parts = line.split(maxsplit=2)\n if len(parts) > 1:\n tidpid[parts[1]] = parts[0]\n sp = subprocess.run(['/usr/bin/top', '-aHSTn', '-d2', '999999'],\n capture_output=True, text=True)\n topData = sp.stdout.strip().split('\\n\\n', 2)[-1]\n for line in topData.split('\\n'):\n if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:\n is_header = False\n if is_header:\n if len(line.strip()) > 0:\n result['headers'].append(line)\n elif fieldnames is None:\n fieldnames = ['PID'] + line.split()\n else:\n tmp = line.split(maxsplit=10)\n record = {'C': '0'}\n for field_id in range(len(fieldnames)):\n fieldname = fieldnames[field_id]\n if field_id == 0:\n record[fieldname] = tidpid[tmp[0]] if tmp[0\n ] in tidpid else ''\n else:\n record[fieldname] = tmp[field_id - 1]\n if fieldname not in field_max_width or field_max_width[\n fieldname] < len(record[fieldname]):\n field_max_width[fieldname] = len(record[fieldname])\n result['details'].append(record)\n if len(sys.argv) > 1 and sys.argv[1] == 'json':\n print(ujson.dumps(result))\n else:\n for header_line in result['headers']:\n print(header_line)\n print('\\n')\n if fieldnames is not None:\n format_str = ''\n header_fields = {}\n for fieldname in fieldnames:\n format_str = '%s %%(%s)-%ds' % (format_str, fieldname, \n field_max_width[fieldname] + 1)\n header_fields[fieldname] = fieldname\n print(format_str % header_fields)\n for detail_line in result['details']:\n print(format_str % detail_line)\n",
"step-3": "<mask token>\nimport collections\nimport tempfile\nimport subprocess\nimport os\nimport sys\nimport ujson\nif __name__ == '__main__':\n fieldnames = None\n field_max_width = dict()\n result = {'headers': [], 'details': []}\n is_header = True\n tidpid = dict()\n for line in subprocess.run(['/usr/bin/procstat', '-ath'],\n capture_output=True, text=True).stdout.split('\\n'):\n parts = line.split(maxsplit=2)\n if len(parts) > 1:\n tidpid[parts[1]] = parts[0]\n sp = subprocess.run(['/usr/bin/top', '-aHSTn', '-d2', '999999'],\n capture_output=True, text=True)\n topData = sp.stdout.strip().split('\\n\\n', 2)[-1]\n for line in topData.split('\\n'):\n if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:\n is_header = False\n if is_header:\n if len(line.strip()) > 0:\n result['headers'].append(line)\n elif fieldnames is None:\n fieldnames = ['PID'] + line.split()\n else:\n tmp = line.split(maxsplit=10)\n record = {'C': '0'}\n for field_id in range(len(fieldnames)):\n fieldname = fieldnames[field_id]\n if field_id == 0:\n record[fieldname] = tidpid[tmp[0]] if tmp[0\n ] in tidpid else ''\n else:\n record[fieldname] = tmp[field_id - 1]\n if fieldname not in field_max_width or field_max_width[\n fieldname] < len(record[fieldname]):\n field_max_width[fieldname] = len(record[fieldname])\n result['details'].append(record)\n if len(sys.argv) > 1 and sys.argv[1] == 'json':\n print(ujson.dumps(result))\n else:\n for header_line in result['headers']:\n print(header_line)\n print('\\n')\n if fieldnames is not None:\n format_str = ''\n header_fields = {}\n for fieldname in fieldnames:\n format_str = '%s %%(%s)-%ds' % (format_str, fieldname, \n field_max_width[fieldname] + 1)\n header_fields[fieldname] = fieldname\n print(format_str % header_fields)\n for detail_line in result['details']:\n print(format_str % detail_line)\n",
"step-4": "#!/usr/local/bin/python3\n\n\"\"\"\n Copyright (c) 2015-2019 Ad Schellevis <[email protected]>\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,\n INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY\n AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\n --------------------------------------------------------------------------------------\n returns system activity (top)\n\"\"\"\nimport collections\nimport tempfile\nimport subprocess\nimport os\nimport sys\nimport ujson\n\nif __name__ == '__main__':\n fieldnames = None\n field_max_width = dict()\n result = {'headers': [], 'details': []}\n is_header = True\n tidpid = dict()\n for line in subprocess.run(['/usr/bin/procstat','-ath'], capture_output=True, text=True).stdout.split('\\n'):\n parts = line.split(maxsplit=2)\n if len(parts) > 1:\n tidpid[parts[1]] = parts[0]\n # grab second display so that CPU time data appears\n sp = subprocess.run(['/usr/bin/top','-aHSTn','-d2','999999'], capture_output=True, text=True)\n topData = sp.stdout.strip().split('\\n\\n',2)[-1]\n for line in topData.split('\\n'):\n # end of header, start of top detection\n if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:\n is_header = False\n if is_header:\n # parse headers from top command, add to result\n if len(line.strip()) > 0:\n result['headers'].append(line)\n else:\n # parse details including fieldnames (leave original)\n if fieldnames is None:\n fieldnames = ['PID'] + line.split()\n else:\n tmp = line.split(maxsplit=10)\n record = {'C': '0'}\n for field_id in range(len(fieldnames)):\n fieldname = fieldnames[field_id]\n if field_id == 0: # PID\n record[fieldname] = tidpid[tmp[0]] if tmp[0] in tidpid else ''\n else:\n record[fieldname] = tmp[field_id - 1]\n\n if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):\n field_max_width[fieldname] = len(record[fieldname])\n result['details'].append(record)\n\n if len(sys.argv) > 1 and sys.argv[1] == 'json':\n # output as json\n print(ujson.dumps(result))\n else:\n # output plain (reconstruct data)\n for header_line in result['headers']:\n print (header_line)\n print (\"\\n\")\n if fieldnames is not None:\n format_str = \"\"\n header_fields = {}\n for fieldname in fieldnames:\n format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)\n header_fields[fieldname] = fieldname\n\n print (format_str % header_fields)\n for detail_line in result['details']:\n print (format_str % detail_line)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from datetime import datetime
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import pytest
from django.test import TestCase
try:
from django.test import override_settings
except ImportError:
from django.test.utils import override_settings
from django.utils import timezone
from custom_email_user.models import EmailUser
from custom_email_user.managers import EmailUserManager
fake_now = datetime(2015, 9, 10)
@override_settings(USE_TZ=False)
class TestEmailUserManager(TestCase):
def setUp(self):
self.email = '[email protected]'
self.password = 'default'
def test_private_create_user_without_email(self):
"""
Test that EmailUser.objects._create_user without email raise an
ValueError exception
"""
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user(None, None, False, False)
self.assertIn('email must be set', str(exinfo.value))
@patch.object(timezone, 'now', return_value=fake_now)
def test_private_create_user_its_ok(self, mock_now):
user = EmailUser.objects._create_user(self.email, self.password,
True, False)
self.assertTrue(isinstance(user, EmailUser))
self.assertIsNotNone(user.pk)
self.assertEqual(user.email, self.email)
self.assertEqual(user.date_joined, fake_now)
self.assertEqual(user.last_login, fake_now)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertTrue(user.check_password(self.password))
def test_private_create_user_with_wrong_email(self):
with pytest.raises(ValueError) as exinfo:
EmailUser.objects._create_user('wrong@example', None, False, False)
self.assertIn('email must be a valid email', str(exinfo.value))
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_without_staff(
self, mock_create_user):
EmailUser.objects.create_user(self.email, self.password)
mock_create_user.assert_called_once_with(
self.email, self.password, False, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_user_call_private_create_user_with_staff(
self, mock_create_user):
EmailUser.objects.create_user(self.email, self.password, True)
mock_create_user.assert_called_once_with(
self.email, self.password, True, False)
@patch.object(EmailUserManager, '_create_user')
def test_create_superuser_call_private_create_user(self, mock_create_user):
EmailUser.objects.create_superuser(self.email, self.password)
mock_create_user.assert_called_once_with(
self.email, self.password, True, True)
|
normal
|
{
"blob_id": "71f9d9d7973809654db3ea613073f2d431f2d65f",
"index": 1510,
"step-1": "<mask token>\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n <mask token>\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-2": "<mask token>\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n<mask token>\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\n<mask token>\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-3": "<mask token>\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n<mask token>\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\n<mask token>\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-4": "from __future__ import unicode_literals, absolute_import\nfrom datetime import datetime\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\nimport pytest\nfrom django.test import TestCase\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\nfrom django.utils import timezone\nfrom custom_email_user.models import EmailUser\nfrom custom_email_user.managers import EmailUserManager\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password, \n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(self,\n mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(self.email, self.password,\n True, True)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nfrom datetime import datetime\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\nimport pytest\n\nfrom django.test import TestCase\ntry:\n from django.test import override_settings\nexcept ImportError:\n from django.test.utils import override_settings\nfrom django.utils import timezone\n\nfrom custom_email_user.models import EmailUser\nfrom custom_email_user.managers import EmailUserManager\n\nfake_now = datetime(2015, 9, 10)\n\n\n@override_settings(USE_TZ=False)\nclass TestEmailUserManager(TestCase):\n\n def setUp(self):\n self.email = '[email protected]'\n self.password = 'default'\n\n def test_private_create_user_without_email(self):\n \"\"\"\n Test that EmailUser.objects._create_user without email raise an\n ValueError exception\n \"\"\"\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))\n\n @patch.object(timezone, 'now', return_value=fake_now)\n def test_private_create_user_its_ok(self, mock_now):\n user = EmailUser.objects._create_user(self.email, self.password,\n True, False)\n self.assertTrue(isinstance(user, EmailUser))\n self.assertIsNotNone(user.pk)\n self.assertEqual(user.email, self.email)\n self.assertEqual(user.date_joined, fake_now)\n self.assertEqual(user.last_login, fake_now)\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_superuser)\n self.assertTrue(user.check_password(self.password))\n\n def test_private_create_user_with_wrong_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user('wrong@example', None, False, False)\n self.assertIn('email must be a valid email', str(exinfo.value))\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_without_staff(\n self, mock_create_user):\n EmailUser.objects.create_user(self.email, self.password)\n mock_create_user.assert_called_once_with(\n self.email, self.password, False, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_user_call_private_create_user_with_staff(\n self, mock_create_user):\n EmailUser.objects.create_user(self.email, self.password, True)\n mock_create_user.assert_called_once_with(\n self.email, self.password, True, False)\n\n @patch.object(EmailUserManager, '_create_user')\n def test_create_superuser_call_private_create_user(self, mock_create_user):\n EmailUser.objects.create_superuser(self.email, self.password)\n mock_create_user.assert_called_once_with(\n self.email, self.password, True, True)\n\n\n",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
import pandas
from sklearn.externals import joblib
import TrainTestProcesser
from sklearn.ensemble import RandomForestClassifier
from Select_OF_File import get_subdir
import matplotlib.pyplot as mp
import sklearn.model_selection as ms
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import numpy as np
import itertools
def main():
#获取数据集
#不使用第一列作为行索引
data_set = pandas.read_csv("dataset.csv",index_col=False,encoding='gbk')
print("数据集的shape:",data_set.shape)
#将数据集分为特征x和标签y
dnumpy_x,dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)
#使用StratifiedKFold将数据集分为训练集和测试集
folds= TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)
#创建模型
model=RandomForestClassifier(n_estimators=23)
#使用kfol交叉验证
TrainTestProcesser.apply_SKfold(model, folds)
#训练模型
TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)
#保存模型以备将来使用
joblib.dump(model,"RFC_model.plk")
def getconfusion_matrix():
mp.rcParams['font.family'] = ['sans-serif']
mp.rcParams['font.sans-serif'] = ['SimHei']
classes=get_subdir("音频文件")
data_set = pandas.read_csv("dataset.csv",index_col=False,encoding='gbk')
dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)
train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x, dnumpy_y, test_size=0.25, random_state=7)
model=joblib.load("RFC_model.plk")
pred_test_y = model.predict(test_x)
#混淆矩阵
cm=confusion_matrix(test_y, pred_test_y)
# 获取分类报告
r = classification_report(test_y, pred_test_y)
print('分类报告为:', r, sep='\n')
mp.figure()
plot_confusion_matrix(cm, classes=classes, normalize=True,
title='随机森林分类器混淆矩阵')
def plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',
cmap=mp.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("混淆矩阵归一化")
else:
print('混淆矩阵未归一化')
print("混淆矩阵为:",cm)
mp.imshow(cm, interpolation='nearest', cmap=cmap)
mp.title(title)
mp.colorbar()
tick_marks = np.arange(len(classes))
mp.xticks(tick_marks, classes, rotation=45)
mp.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
mp.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
mp.tight_layout()
mp.ylabel('True label')
mp.xlabel('Predicted label')
mp.savefig('confusion_matrix_RFC.png', format='png')
mp.show()
if __name__ == "__main__":
main()
getconfusion_matrix()
|
normal
|
{
"blob_id": "b0bc55ab05d49605e2f42ea036f8405727c468d2",
"index": 3504,
"step-1": "<mask token>\n\n\ndef main():\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n print('数据集的shape:', data_set.shape)\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n folds = TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)\n model = RandomForestClassifier(n_estimators=23)\n TrainTestProcesser.apply_SKfold(model, folds)\n TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)\n joblib.dump(model, 'RFC_model.plk')\n\n\ndef getconfusion_matrix():\n mp.rcParams['font.family'] = ['sans-serif']\n mp.rcParams['font.sans-serif'] = ['SimHei']\n classes = get_subdir('音频文件')\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x,\n dnumpy_y, test_size=0.25, random_state=7)\n model = joblib.load('RFC_model.plk')\n pred_test_y = model.predict(test_x)\n cm = confusion_matrix(test_y, pred_test_y)\n r = classification_report(test_y, pred_test_y)\n print('分类报告为:', r, sep='\\n')\n mp.figure()\n plot_confusion_matrix(cm, classes=classes, normalize=True, title=\n '随机森林分类器混淆矩阵')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n print('数据集的shape:', data_set.shape)\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n folds = TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)\n model = RandomForestClassifier(n_estimators=23)\n TrainTestProcesser.apply_SKfold(model, folds)\n TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)\n joblib.dump(model, 'RFC_model.plk')\n\n\ndef getconfusion_matrix():\n mp.rcParams['font.family'] = ['sans-serif']\n mp.rcParams['font.sans-serif'] = ['SimHei']\n classes = get_subdir('音频文件')\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x,\n dnumpy_y, test_size=0.25, random_state=7)\n model = joblib.load('RFC_model.plk')\n pred_test_y = model.predict(test_x)\n cm = confusion_matrix(test_y, pred_test_y)\n r = classification_report(test_y, pred_test_y)\n print('分类报告为:', r, sep='\\n')\n mp.figure()\n plot_confusion_matrix(cm, classes=classes, normalize=True, title=\n '随机森林分类器混淆矩阵')\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=mp.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('混淆矩阵归一化')\n else:\n print('混淆矩阵未归一化')\n print('混淆矩阵为:', cm)\n mp.imshow(cm, interpolation='nearest', cmap=cmap)\n mp.title(title)\n mp.colorbar()\n tick_marks = np.arange(len(classes))\n mp.xticks(tick_marks, classes, rotation=45)\n mp.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n mp.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n mp.tight_layout()\n mp.ylabel('True label')\n mp.xlabel('Predicted label')\n mp.savefig('confusion_matrix_RFC.png', format='png')\n mp.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n print('数据集的shape:', data_set.shape)\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n folds = TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)\n model = RandomForestClassifier(n_estimators=23)\n TrainTestProcesser.apply_SKfold(model, folds)\n TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)\n joblib.dump(model, 'RFC_model.plk')\n\n\ndef getconfusion_matrix():\n mp.rcParams['font.family'] = ['sans-serif']\n mp.rcParams['font.sans-serif'] = ['SimHei']\n classes = get_subdir('音频文件')\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x,\n dnumpy_y, test_size=0.25, random_state=7)\n model = joblib.load('RFC_model.plk')\n pred_test_y = model.predict(test_x)\n cm = confusion_matrix(test_y, pred_test_y)\n r = classification_report(test_y, pred_test_y)\n print('分类报告为:', r, sep='\\n')\n mp.figure()\n plot_confusion_matrix(cm, classes=classes, normalize=True, title=\n '随机森林分类器混淆矩阵')\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=mp.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('混淆矩阵归一化')\n else:\n print('混淆矩阵未归一化')\n print('混淆矩阵为:', cm)\n mp.imshow(cm, interpolation='nearest', cmap=cmap)\n mp.title(title)\n mp.colorbar()\n tick_marks = np.arange(len(classes))\n mp.xticks(tick_marks, classes, rotation=45)\n mp.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n mp.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n mp.tight_layout()\n mp.ylabel('True label')\n mp.xlabel('Predicted label')\n mp.savefig('confusion_matrix_RFC.png', format='png')\n mp.show()\n\n\nif __name__ == '__main__':\n main()\n getconfusion_matrix()\n",
"step-4": "import pandas\nfrom sklearn.externals import joblib\nimport TrainTestProcesser\nfrom sklearn.ensemble import RandomForestClassifier\nfrom Select_OF_File import get_subdir\nimport matplotlib.pyplot as mp\nimport sklearn.model_selection as ms\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport itertools\n\n\ndef main():\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n print('数据集的shape:', data_set.shape)\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n folds = TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)\n model = RandomForestClassifier(n_estimators=23)\n TrainTestProcesser.apply_SKfold(model, folds)\n TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)\n joblib.dump(model, 'RFC_model.plk')\n\n\ndef getconfusion_matrix():\n mp.rcParams['font.family'] = ['sans-serif']\n mp.rcParams['font.sans-serif'] = ['SimHei']\n classes = get_subdir('音频文件')\n data_set = pandas.read_csv('dataset.csv', index_col=False, encoding='gbk')\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x,\n dnumpy_y, test_size=0.25, random_state=7)\n model = joblib.load('RFC_model.plk')\n pred_test_y = model.predict(test_x)\n cm = confusion_matrix(test_y, pred_test_y)\n r = classification_report(test_y, pred_test_y)\n print('分类报告为:', r, sep='\\n')\n mp.figure()\n plot_confusion_matrix(cm, classes=classes, normalize=True, title=\n '随机森林分类器混淆矩阵')\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=mp.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('混淆矩阵归一化')\n else:\n print('混淆矩阵未归一化')\n print('混淆矩阵为:', cm)\n mp.imshow(cm, interpolation='nearest', cmap=cmap)\n mp.title(title)\n mp.colorbar()\n tick_marks = np.arange(len(classes))\n mp.xticks(tick_marks, classes, rotation=45)\n mp.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n mp.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n mp.tight_layout()\n mp.ylabel('True label')\n mp.xlabel('Predicted label')\n mp.savefig('confusion_matrix_RFC.png', format='png')\n mp.show()\n\n\nif __name__ == '__main__':\n main()\n getconfusion_matrix()\n",
"step-5": "import pandas\nfrom sklearn.externals import joblib\nimport TrainTestProcesser\nfrom sklearn.ensemble import RandomForestClassifier\nfrom Select_OF_File import get_subdir\nimport matplotlib.pyplot as mp\nimport sklearn.model_selection as ms\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport itertools\ndef main():\n #获取数据集\n #不使用第一列作为行索引\n data_set = pandas.read_csv(\"dataset.csv\",index_col=False,encoding='gbk')\n print(\"数据集的shape:\",data_set.shape)\n #将数据集分为特征x和标签y\n dnumpy_x,dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n #使用StratifiedKFold将数据集分为训练集和测试集\n folds= TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)\n #创建模型\n model=RandomForestClassifier(n_estimators=23)\n #使用kfol交叉验证\n TrainTestProcesser.apply_SKfold(model, folds)\n #训练模型\n TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)\n #保存模型以备将来使用\n joblib.dump(model,\"RFC_model.plk\")\n\n\n\ndef getconfusion_matrix():\n mp.rcParams['font.family'] = ['sans-serif']\n mp.rcParams['font.sans-serif'] = ['SimHei']\n classes=get_subdir(\"音频文件\")\n data_set = pandas.read_csv(\"dataset.csv\",index_col=False,encoding='gbk')\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x, dnumpy_y, test_size=0.25, random_state=7)\n model=joblib.load(\"RFC_model.plk\")\n pred_test_y = model.predict(test_x)\n #混淆矩阵\n cm=confusion_matrix(test_y, pred_test_y)\n # 获取分类报告\n r = classification_report(test_y, pred_test_y)\n print('分类报告为:', r, sep='\\n')\n\n mp.figure()\n plot_confusion_matrix(cm, classes=classes, normalize=True,\n title='随机森林分类器混淆矩阵')\n\ndef plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',\n cmap=mp.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"混淆矩阵归一化\")\n else:\n print('混淆矩阵未归一化')\n\n print(\"混淆矩阵为:\",cm)\n\n mp.imshow(cm, interpolation='nearest', cmap=cmap)\n mp.title(title)\n mp.colorbar()\n tick_marks = np.arange(len(classes))\n mp.xticks(tick_marks, classes, rotation=45)\n mp.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n mp.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n mp.tight_layout()\n mp.ylabel('True label')\n mp.xlabel('Predicted label')\n mp.savefig('confusion_matrix_RFC.png', format='png')\n mp.show()\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n getconfusion_matrix()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
train_station.add_platform(platform)
<|reserved_special_token_0|>
platform.accept_train(train_1)
<|reserved_special_token_0|>
train_1.dock_section(train_section_1)
train_1.dock_section(train_section_2)
train_1.dock_section(train_section_3)
train_1.print_sections()
<|reserved_special_token_0|>
train_section_1.get_on_train(person_1)
train_section_1.get_on_train(person_2)
train_section_2.get_on_train(person_3)
train_section_3.get_on_train(person_4)
train_section_2.get_off_train(person_3)
train_1.show_current_passengers()
train_1.count_passengers()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
platform = Platform('platform 1')
train_station = TrainStation('Linz')
train_station.add_platform(platform)
train_1 = ICE('ICE 1')
platform.accept_train(train_1)
train_section_1 = TrainSection('First section')
train_section_2 = TrainSection('Second section')
train_section_3 = TrainSection('Third section')
train_1.dock_section(train_section_1)
train_1.dock_section(train_section_2)
train_1.dock_section(train_section_3)
train_1.print_sections()
person_1 = Person('Franz', 'Mair')
person_2 = Person('Michael', 'Schuh')
person_3 = Person('Herbert', 'Sailer')
person_4 = Person('Michaela', 'Mader')
train_section_1.get_on_train(person_1)
train_section_1.get_on_train(person_2)
train_section_2.get_on_train(person_3)
train_section_3.get_on_train(person_4)
train_section_2.get_off_train(person_3)
train_1.show_current_passengers()
train_1.count_passengers()
<|reserved_special_token_1|>
from draft import *
platform = Platform('platform 1')
train_station = TrainStation('Linz')
train_station.add_platform(platform)
train_1 = ICE('ICE 1')
platform.accept_train(train_1)
train_section_1 = TrainSection('First section')
train_section_2 = TrainSection('Second section')
train_section_3 = TrainSection('Third section')
train_1.dock_section(train_section_1)
train_1.dock_section(train_section_2)
train_1.dock_section(train_section_3)
train_1.print_sections()
person_1 = Person('Franz', 'Mair')
person_2 = Person('Michael', 'Schuh')
person_3 = Person('Herbert', 'Sailer')
person_4 = Person('Michaela', 'Mader')
train_section_1.get_on_train(person_1)
train_section_1.get_on_train(person_2)
train_section_2.get_on_train(person_3)
train_section_3.get_on_train(person_4)
train_section_2.get_off_train(person_3)
train_1.show_current_passengers()
train_1.count_passengers()
<|reserved_special_token_1|>
from draft import *
# create a train station
platform = Platform('platform 1')
train_station = TrainStation('Linz')
train_station.add_platform(platform)
# create a train
train_1 = ICE('ICE 1')
platform.accept_train(train_1)
train_section_1 = TrainSection('First section')
train_section_2 = TrainSection('Second section')
train_section_3 = TrainSection('Third section')
train_1.dock_section(train_section_1)
train_1.dock_section(train_section_2)
train_1.dock_section(train_section_3)
train_1.print_sections()
# Expected output: First section - Second section - Third section
# create persons
person_1 = Person('Franz', 'Mair')
person_2 = Person('Michael', 'Schuh')
person_3 = Person('Herbert', 'Sailer')
person_4 = Person('Michaela', 'Mader')
train_section_1.get_on_train(person_1)
# Expected output: Franz Mair is on the train now
train_section_1.get_on_train(person_2)
# Expected output: Michael Schuh is on the train now
train_section_2.get_on_train(person_3)
# Expected output: Herbert Sailer is on the train now
train_section_3.get_on_train(person_4)
# Expected output: Michaela Mader is on the train now
train_section_2.get_off_train(person_3)
# Expected output: Herbert Sailer has left the train
# query passengers
train_1.show_current_passengers()
# Expected output: Franz Mair, Michel Schuh, Michaela Mader
train_1.count_passengers()
# Expected output: 3
|
flexible
|
{
"blob_id": "5900dc0acde45ac9a31dc9d489aa8dae304d626b",
"index": 1791,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntrain_station.add_platform(platform)\n<mask token>\nplatform.accept_train(train_1)\n<mask token>\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\n<mask token>\ntrain_section_1.get_on_train(person_1)\ntrain_section_1.get_on_train(person_2)\ntrain_section_2.get_on_train(person_3)\ntrain_section_3.get_on_train(person_4)\ntrain_section_2.get_off_train(person_3)\ntrain_1.show_current_passengers()\ntrain_1.count_passengers()\n",
"step-3": "<mask token>\nplatform = Platform('platform 1')\ntrain_station = TrainStation('Linz')\ntrain_station.add_platform(platform)\ntrain_1 = ICE('ICE 1')\nplatform.accept_train(train_1)\ntrain_section_1 = TrainSection('First section')\ntrain_section_2 = TrainSection('Second section')\ntrain_section_3 = TrainSection('Third section')\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\nperson_1 = Person('Franz', 'Mair')\nperson_2 = Person('Michael', 'Schuh')\nperson_3 = Person('Herbert', 'Sailer')\nperson_4 = Person('Michaela', 'Mader')\ntrain_section_1.get_on_train(person_1)\ntrain_section_1.get_on_train(person_2)\ntrain_section_2.get_on_train(person_3)\ntrain_section_3.get_on_train(person_4)\ntrain_section_2.get_off_train(person_3)\ntrain_1.show_current_passengers()\ntrain_1.count_passengers()\n",
"step-4": "from draft import *\nplatform = Platform('platform 1')\ntrain_station = TrainStation('Linz')\ntrain_station.add_platform(platform)\ntrain_1 = ICE('ICE 1')\nplatform.accept_train(train_1)\ntrain_section_1 = TrainSection('First section')\ntrain_section_2 = TrainSection('Second section')\ntrain_section_3 = TrainSection('Third section')\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\nperson_1 = Person('Franz', 'Mair')\nperson_2 = Person('Michael', 'Schuh')\nperson_3 = Person('Herbert', 'Sailer')\nperson_4 = Person('Michaela', 'Mader')\ntrain_section_1.get_on_train(person_1)\ntrain_section_1.get_on_train(person_2)\ntrain_section_2.get_on_train(person_3)\ntrain_section_3.get_on_train(person_4)\ntrain_section_2.get_off_train(person_3)\ntrain_1.show_current_passengers()\ntrain_1.count_passengers()\n",
"step-5": "from draft import *\n# create a train station\nplatform = Platform('platform 1')\ntrain_station = TrainStation('Linz')\ntrain_station.add_platform(platform)\n# create a train\ntrain_1 = ICE('ICE 1')\nplatform.accept_train(train_1)\ntrain_section_1 = TrainSection('First section')\ntrain_section_2 = TrainSection('Second section')\ntrain_section_3 = TrainSection('Third section')\ntrain_1.dock_section(train_section_1)\ntrain_1.dock_section(train_section_2)\ntrain_1.dock_section(train_section_3)\ntrain_1.print_sections()\n# Expected output: First section - Second section - Third section\n# create persons\nperson_1 = Person('Franz', 'Mair')\nperson_2 = Person('Michael', 'Schuh')\nperson_3 = Person('Herbert', 'Sailer')\nperson_4 = Person('Michaela', 'Mader')\ntrain_section_1.get_on_train(person_1)\n# Expected output: Franz Mair is on the train now\ntrain_section_1.get_on_train(person_2)\n# Expected output: Michael Schuh is on the train now\ntrain_section_2.get_on_train(person_3)\n# Expected output: Herbert Sailer is on the train now\ntrain_section_3.get_on_train(person_4)\n# Expected output: Michaela Mader is on the train now\ntrain_section_2.get_off_train(person_3)\n# Expected output: Herbert Sailer has left the train\n# query passengers\ntrain_1.show_current_passengers()\n# Expected output: Franz Mair, Michel Schuh, Michaela Mader\ntrain_1.count_passengers()\n# Expected output: 3\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
naive -- o(n^2)
"""
## StefanPochmann solution #2
def countSmaller(self, nums):
def sort(enum):
half = len(enum) / 2
if half:
left = sort(enum[:half])
right = sort(enum[half:])
for i in range(len(enum))[::-1]:
if not right or left and left[-1][1] > right[-1][1]:
smaller[left[-1][0]] += len(right)
enum[i] = left.pop()
else:
enum[i] = right.pop()
return enum
smaller = [0] * len(nums)
sort(list(enumerate(nums)))
return smaller
## StefanPochmann solution #1
def countSmaller(self, nums):
def sort(enum):
half = len(enum) / 2
if half:
left, right = sort(enum[:half]), sort(enum[half:])
m, n = len(left), len(right)
i = j = 0
while i < m or j < n:
if j == n or i < m and left[i][1] <= right[j][1]:
enum[i+j] = left[i]
smaller[left[i][0]] += j
i += 1
else:
enum[i+j] = right[j]
j += 1
return enum
smaller = [0] * len(nums)
sort(list(enumerate(nums)))
return smaller
"""
a = [2,4,6]
b = [1,3,5]
"""
def mergesort(x):
if len(x)==0:
return x, []
if len(x)==1:
return x, [0]
mid = len(x)/2
a, A = mergesort(x[:mid])
b, B = mergesort(x[mid:])
y, Y = merge(a, b, A, B)
return y,Y
def merge(a, b):
res = []
i,j = 0,0
while i < len(a) and j < len(b):
if a[i] <= b[j]:
res.append(a[i])
i += 1
else:
res.append(b[j])
###
j += 1
if i < len(a):
res += a[i:]
elif j < len(b):
res += b[j:]
return res
merge([2,4,6], [1,3,5], [0,0,0], [0,0,0])
mergesort([5, 2, 6, 1])
"""
base merge sort:
"""
def mergesort(x):
if len(x)==0 or len(x)==1:
return x
else:
mid = len(x)/2
a = mergesort(x[:mid])
b = mergesort(x[mid:])
return merge(a,b)
def merge(a,b, left, right): # left, right will keep track of inversions so far
res = []
while len(a)>0 and len(b)>0:
if a[0] < b[0]:
res += [a[0]]
a = a[1:]
else:
res += [b[0]]
b = b[1:]
if len(a) > 0:
res += a
if len(b) > 0:
res += b
return res
|
normal
|
{
"blob_id": "42021b762737a2eb21866ba029ece4ac120152cd",
"index": 5902,
"step-1": "class Solution(object):\n\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n naive -- o(n^2)\n \"\"\"\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0:\n return x, []\n if len(x) == 1:\n return x, [0]\n mid = len(x) / 2\n a, A = mergesort(x[:mid])\n b, B = mergesort(x[mid:])\n y, Y = merge(a, b, A, B)\n return y, Y\n\n\ndef merge(a, b):\n res = []\n i, j = 0, 0\n while i < len(a) and j < len(b):\n if a[i] <= b[j]:\n res.append(a[i])\n i += 1\n else:\n res.append(b[j])\n j += 1\n if i < len(a):\n res += a[i:]\n elif j < len(b):\n res += b[j:]\n return res\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0 or len(x) == 1:\n return x\n else:\n mid = len(x) / 2\n a = mergesort(x[:mid])\n b = mergesort(x[mid:])\n return merge(a, b)\n\n\ndef merge(a, b, left, right):\n res = []\n while len(a) > 0 and len(b) > 0:\n if a[0] < b[0]:\n res += [a[0]]\n a = a[1:]\n else:\n res += [b[0]]\n b = b[1:]\n if len(a) > 0:\n res += a\n if len(b) > 0:\n res += b\n return res\n",
"step-2": "class Solution(object):\n\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n naive -- o(n^2)\n \"\"\"\n\n\n<mask token>\n\n\ndef countSmaller(self, nums):\n\n def sort(enum):\n half = len(enum) / 2\n if half:\n left, right = sort(enum[:half]), sort(enum[half:])\n m, n = len(left), len(right)\n i = j = 0\n while i < m or j < n:\n if j == n or i < m and left[i][1] <= right[j][1]:\n enum[i + j] = left[i]\n smaller[left[i][0]] += j\n i += 1\n else:\n enum[i + j] = right[j]\n j += 1\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0:\n return x, []\n if len(x) == 1:\n return x, [0]\n mid = len(x) / 2\n a, A = mergesort(x[:mid])\n b, B = mergesort(x[mid:])\n y, Y = merge(a, b, A, B)\n return y, Y\n\n\ndef merge(a, b):\n res = []\n i, j = 0, 0\n while i < len(a) and j < len(b):\n if a[i] <= b[j]:\n res.append(a[i])\n i += 1\n else:\n res.append(b[j])\n j += 1\n if i < len(a):\n res += a[i:]\n elif j < len(b):\n res += b[j:]\n return res\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0 or len(x) == 1:\n return x\n else:\n mid = len(x) / 2\n a = mergesort(x[:mid])\n b = mergesort(x[mid:])\n return merge(a, b)\n\n\ndef merge(a, b, left, right):\n res = []\n while len(a) > 0 and len(b) > 0:\n if a[0] < b[0]:\n res += [a[0]]\n a = a[1:]\n else:\n res += [b[0]]\n b = b[1:]\n if len(a) > 0:\n res += a\n if len(b) > 0:\n res += b\n return res\n",
"step-3": "class Solution(object):\n\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n naive -- o(n^2)\n \"\"\"\n\n\ndef countSmaller(self, nums):\n\n def sort(enum):\n half = len(enum) / 2\n if half:\n left = sort(enum[:half])\n right = sort(enum[half:])\n for i in range(len(enum))[::-1]:\n if not right or left and left[-1][1] > right[-1][1]:\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n\ndef countSmaller(self, nums):\n\n def sort(enum):\n half = len(enum) / 2\n if half:\n left, right = sort(enum[:half]), sort(enum[half:])\n m, n = len(left), len(right)\n i = j = 0\n while i < m or j < n:\n if j == n or i < m and left[i][1] <= right[j][1]:\n enum[i + j] = left[i]\n smaller[left[i][0]] += j\n i += 1\n else:\n enum[i + j] = right[j]\n j += 1\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0:\n return x, []\n if len(x) == 1:\n return x, [0]\n mid = len(x) / 2\n a, A = mergesort(x[:mid])\n b, B = mergesort(x[mid:])\n y, Y = merge(a, b, A, B)\n return y, Y\n\n\ndef merge(a, b):\n res = []\n i, j = 0, 0\n while i < len(a) and j < len(b):\n if a[i] <= b[j]:\n res.append(a[i])\n i += 1\n else:\n res.append(b[j])\n j += 1\n if i < len(a):\n res += a[i:]\n elif j < len(b):\n res += b[j:]\n return res\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0 or len(x) == 1:\n return x\n else:\n mid = len(x) / 2\n a = mergesort(x[:mid])\n b = mergesort(x[mid:])\n return merge(a, b)\n\n\ndef merge(a, b, left, right):\n res = []\n while len(a) > 0 and len(b) > 0:\n if a[0] < b[0]:\n res += [a[0]]\n a = a[1:]\n else:\n res += [b[0]]\n b = b[1:]\n if len(a) > 0:\n res += a\n if len(b) > 0:\n res += b\n return res\n",
"step-4": "class Solution(object):\n\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n naive -- o(n^2)\n \"\"\"\n\n\ndef countSmaller(self, nums):\n\n def sort(enum):\n half = len(enum) / 2\n if half:\n left = sort(enum[:half])\n right = sort(enum[half:])\n for i in range(len(enum))[::-1]:\n if not right or left and left[-1][1] > right[-1][1]:\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n\ndef countSmaller(self, nums):\n\n def sort(enum):\n half = len(enum) / 2\n if half:\n left, right = sort(enum[:half]), sort(enum[half:])\n m, n = len(left), len(right)\n i = j = 0\n while i < m or j < n:\n if j == n or i < m and left[i][1] <= right[j][1]:\n enum[i + j] = left[i]\n smaller[left[i][0]] += j\n i += 1\n else:\n enum[i + j] = right[j]\n j += 1\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0:\n return x, []\n if len(x) == 1:\n return x, [0]\n mid = len(x) / 2\n a, A = mergesort(x[:mid])\n b, B = mergesort(x[mid:])\n y, Y = merge(a, b, A, B)\n return y, Y\n\n\ndef merge(a, b):\n res = []\n i, j = 0, 0\n while i < len(a) and j < len(b):\n if a[i] <= b[j]:\n res.append(a[i])\n i += 1\n else:\n res.append(b[j])\n j += 1\n if i < len(a):\n res += a[i:]\n elif j < len(b):\n res += b[j:]\n return res\n\n\nmerge([2, 4, 6], [1, 3, 5], [0, 0, 0], [0, 0, 0])\nmergesort([5, 2, 6, 1])\n<mask token>\n\n\ndef mergesort(x):\n if len(x) == 0 or len(x) == 1:\n return x\n else:\n mid = len(x) / 2\n a = mergesort(x[:mid])\n b = mergesort(x[mid:])\n return merge(a, b)\n\n\ndef merge(a, b, left, right):\n res = []\n while len(a) > 0 and len(b) > 0:\n if a[0] < b[0]:\n res += [a[0]]\n a = a[1:]\n else:\n res += [b[0]]\n b = b[1:]\n if len(a) > 0:\n res += a\n if len(b) > 0:\n res += b\n return res\n",
"step-5": "class Solution(object):\n def countSmaller(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n naive -- o(n^2)\n \"\"\"\n\n## StefanPochmann solution #2\ndef countSmaller(self, nums):\n def sort(enum):\n half = len(enum) / 2\n if half:\n left = sort(enum[:half])\n right = sort(enum[half:])\n for i in range(len(enum))[::-1]:\n if not right or left and left[-1][1] > right[-1][1]:\n smaller[left[-1][0]] += len(right)\n enum[i] = left.pop()\n else:\n enum[i] = right.pop()\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n## StefanPochmann solution #1\ndef countSmaller(self, nums):\n def sort(enum):\n half = len(enum) / 2\n if half:\n left, right = sort(enum[:half]), sort(enum[half:])\n m, n = len(left), len(right)\n i = j = 0\n while i < m or j < n:\n if j == n or i < m and left[i][1] <= right[j][1]:\n enum[i+j] = left[i]\n smaller[left[i][0]] += j\n i += 1\n else:\n enum[i+j] = right[j]\n j += 1\n return enum\n smaller = [0] * len(nums)\n sort(list(enumerate(nums)))\n return smaller\n\n\"\"\"\na = [2,4,6] \nb = [1,3,5]\n\"\"\"\n\n\n\ndef mergesort(x):\n if len(x)==0: \n return x, []\n if len(x)==1:\n return x, [0]\n mid = len(x)/2\n a, A = mergesort(x[:mid])\n b, B = mergesort(x[mid:])\n y, Y = merge(a, b, A, B)\n return y,Y\n\ndef merge(a, b):\n res = []\n i,j = 0,0\n while i < len(a) and j < len(b):\n if a[i] <= b[j]:\n res.append(a[i])\n i += 1\n else: \n res.append(b[j])\n ###\n j += 1\n if i < len(a):\n res += a[i:]\n elif j < len(b):\n res += b[j:]\n return res\n\n\n\nmerge([2,4,6], [1,3,5], [0,0,0], [0,0,0])\n\nmergesort([5, 2, 6, 1])\n\n\"\"\"\n\nbase merge sort:\n\"\"\"\n\ndef mergesort(x):\n if len(x)==0 or len(x)==1:\n return x\n else:\n mid = len(x)/2\n a = mergesort(x[:mid])\n b = mergesort(x[mid:])\n return merge(a,b)\n\ndef merge(a,b, left, right): # left, right will keep track of inversions so far\n res = []\n while len(a)>0 and len(b)>0:\n if a[0] < b[0]:\n res += [a[0]]\n a = a[1:]\n else:\n res += [b[0]]\n b = b[1:]\n if len(a) > 0:\n res += a\n if len(b) > 0:\n res += b\n return res\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import random
def createRandomPhoneNumber():
phoneNumberFront = ['130', '131', '132', '133', '134', '135', '136',
'137', '138', '139', '150', '151', '152', '153', '158', '159',
'177', '180', '181', '182', '183', '186', '188', '189']
phoneNumberBack = []
for i in range(8):
phoneNumberBack.append(str(random.randint(0, 9)))
return random.choice(phoneNumberFront) + ''.join(phoneNumberBack)
|
normal
|
{
"blob_id": "5e8f9a222fb2c35b4720e48f0277481e410aee47",
"index": 2791,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef createRandomPhoneNumber():\n phoneNumberFront = ['130', '131', '132', '133', '134', '135', '136',\n '137', '138', '139', '150', '151', '152', '153', '158', '159',\n '177', '180', '181', '182', '183', '186', '188', '189']\n phoneNumberBack = []\n for i in range(8):\n phoneNumberBack.append(str(random.randint(0, 9)))\n return random.choice(phoneNumberFront) + ''.join(phoneNumberBack)\n",
"step-3": "import random\n\n\ndef createRandomPhoneNumber():\n phoneNumberFront = ['130', '131', '132', '133', '134', '135', '136',\n '137', '138', '139', '150', '151', '152', '153', '158', '159',\n '177', '180', '181', '182', '183', '186', '188', '189']\n phoneNumberBack = []\n for i in range(8):\n phoneNumberBack.append(str(random.randint(0, 9)))\n return random.choice(phoneNumberFront) + ''.join(phoneNumberBack)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys, getopt
import sys, locale
import httplib
import json
#sys.argv = [sys.argv[0], '--id=275', '--ofile=275.json']
def getRouteId(routeName, out_filename):
conn = httplib.HTTPConnection("data.ntpc.gov.tw")
qryString = "/od/data/api/67BB3C2B-E7D1-43A7-B872-61B2F082E11B?$format=json&$filter=nameZh%20eq%20" + routeName
conn.request("GET",qryString.encode('utf8'))
response = conn.getresponse()
print response.status, response.reason
data = response.read()
print len(data)
ofile = open(out_filename, "w")
ofile.write(data)
ofile.close()
def main(argv):
route_id = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["id=","ofile="])
except getopt.GetoptError:
print 'cliGetRouteID.py -i <route id> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'cliGetRouteID.py -i <route id> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--id"):
route_id = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
getRouteId(route_id, outputfile)
print 'Route ID is', route_id
print 'Output file is', outputfile
if __name__ == "__main__":
main(sys.argv[1:])
|
normal
|
{
"blob_id": "87c413051ed38b52fbcc0b0cf84ecd75cd1e3f0c",
"index": 3139,
"step-1": "import sys, getopt\nimport sys, locale\nimport httplib\nimport json\n\n#sys.argv = [sys.argv[0], '--id=275', '--ofile=275.json']\n\ndef getRouteId(routeName, out_filename):\n conn = httplib.HTTPConnection(\"data.ntpc.gov.tw\")\n qryString = \"/od/data/api/67BB3C2B-E7D1-43A7-B872-61B2F082E11B?$format=json&$filter=nameZh%20eq%20\" + routeName\n conn.request(\"GET\",qryString.encode('utf8'))\n response = conn.getresponse()\n print response.status, response.reason\n\n data = response.read()\n print len(data)\n\n ofile = open(out_filename, \"w\")\n ofile.write(data)\n ofile.close()\n \ndef main(argv):\n route_id = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"id=\",\"ofile=\"])\n except getopt.GetoptError:\n print 'cliGetRouteID.py -i <route id> -o <outputfile>'\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print 'cliGetRouteID.py -i <route id> -o <outputfile>'\n sys.exit()\n elif opt in (\"-i\", \"--id\"):\n route_id = arg \n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n\n getRouteId(route_id, outputfile)\n print 'Route ID is', route_id\n print 'Output file is', outputfile\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
from cagd.polyline import polyline
from cagd.spline import spline, knots
from cagd.vec import vec2
import cagd.scene_2d as scene_2d
from math import sin,cos,pi, sqrt
#returns a list of num_samples points that are uniformly distributed on the unit circle
def unit_circle_points(num_samples):
a = 2*pi/num_samples
return [vec2(cos(a*i), sin(a*i)) for i in range(num_samples)]
#calculates the deviation between the given spline and a unit circle
#the Manhattan Metrics is chosen
def calculate_circle_deviation(spline):
ideal_d = 1.0
center_x = 0.0
center_y = 0.0
deviation = 0.0
for p in spline.control_points:
deviation += sqrt((p.x - center_x)**2 + (p.y - center_y)**2)
deviation /= len(spline.control_points)
deviation -= ideal_d
return deviation
#interpolate 6 points with a periodic spline to create the number "8"
pts = [vec2( 0, 2.5), vec2(-1, 1), vec2( 1,-1), vec2( 0,-2.5), vec2(-1,-1), vec2(1,1)]
s = spline.interpolate_cubic_periodic(pts)
p = s.get_polyline_from_control_points()
p.set_color("blue")
sc = scene_2d.scene()
sc.set_resolution(900)
sc.add_element(s)
sc.add_element(p)
#generate a spline that approximates the unit circle
n = 100
circle_pts = unit_circle_points(n)
circle = spline.interpolate_cubic_periodic(circle_pts)
p_circle = circle.get_polyline_from_control_points()
#sc.add_element(circle)
#sc.add_element(p_circle)
p_circle.set_color("blue")
error = calculate_circle_deviation(circle)
print("The error is: " + str(error))
sc.write_image()
sc.show()
|
normal
|
{
"blob_id": "35e61add90b5c12f94d5f8071f00d98316461dd6",
"index": 8497,
"step-1": "<mask token>\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\n<mask token>\np.set_color('blue')\n<mask token>\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\n<mask token>\np_circle.set_color('blue')\n<mask token>\nprint('The error is: ' + str(error))\nsc.write_image()\nsc.show()\n",
"step-3": "<mask token>\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\npts = [vec2(0, 2.5), vec2(-1, 1), vec2(1, -1), vec2(0, -2.5), vec2(-1, -1),\n vec2(1, 1)]\ns = spline.interpolate_cubic_periodic(pts)\np = s.get_polyline_from_control_points()\np.set_color('blue')\nsc = scene_2d.scene()\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\nn = 100\ncircle_pts = unit_circle_points(n)\ncircle = spline.interpolate_cubic_periodic(circle_pts)\np_circle = circle.get_polyline_from_control_points()\np_circle.set_color('blue')\nerror = calculate_circle_deviation(circle)\nprint('The error is: ' + str(error))\nsc.write_image()\nsc.show()\n",
"step-4": "from cagd.polyline import polyline\nfrom cagd.spline import spline, knots\nfrom cagd.vec import vec2\nimport cagd.scene_2d as scene_2d\nfrom math import sin, cos, pi, sqrt\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\npts = [vec2(0, 2.5), vec2(-1, 1), vec2(1, -1), vec2(0, -2.5), vec2(-1, -1),\n vec2(1, 1)]\ns = spline.interpolate_cubic_periodic(pts)\np = s.get_polyline_from_control_points()\np.set_color('blue')\nsc = scene_2d.scene()\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\nn = 100\ncircle_pts = unit_circle_points(n)\ncircle = spline.interpolate_cubic_periodic(circle_pts)\np_circle = circle.get_polyline_from_control_points()\np_circle.set_color('blue')\nerror = calculate_circle_deviation(circle)\nprint('The error is: ' + str(error))\nsc.write_image()\nsc.show()\n",
"step-5": "#!/usr/bin/python\n\nfrom cagd.polyline import polyline\nfrom cagd.spline import spline, knots\nfrom cagd.vec import vec2\nimport cagd.scene_2d as scene_2d\nfrom math import sin,cos,pi, sqrt\n\n#returns a list of num_samples points that are uniformly distributed on the unit circle\ndef unit_circle_points(num_samples):\n a = 2*pi/num_samples\n return [vec2(cos(a*i), sin(a*i)) for i in range(num_samples)]\n\n#calculates the deviation between the given spline and a unit circle\n#the Manhattan Metrics is chosen\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x)**2 + (p.y - center_y)**2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\n#interpolate 6 points with a periodic spline to create the number \"8\"\npts = [vec2( 0, 2.5), vec2(-1, 1), vec2( 1,-1), vec2( 0,-2.5), vec2(-1,-1), vec2(1,1)]\ns = spline.interpolate_cubic_periodic(pts)\np = s.get_polyline_from_control_points()\np.set_color(\"blue\")\nsc = scene_2d.scene()\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\n\n#generate a spline that approximates the unit circle\nn = 100\ncircle_pts = unit_circle_points(n)\ncircle = spline.interpolate_cubic_periodic(circle_pts)\np_circle = circle.get_polyline_from_control_points()\n#sc.add_element(circle)\n#sc.add_element(p_circle)\np_circle.set_color(\"blue\")\nerror = calculate_circle_deviation(circle)\nprint(\"The error is: \" + str(error))\n\nsc.write_image()\nsc.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
c = int(input())
t = input()
m = []
for i in range(12):
aux = []
for j in range(12):
aux.append(float(input()))
m.append(aux)
aux = []
soma = 0
for i in range(12):
soma += m[i][c]
resultado = soma / (t == 'S' and 1 or 12)
print('%.1f' % resultado)
|
normal
|
{
"blob_id": "6edb1f99ca9af01f28322cbaf13f278e79b94e92",
"index": 5882,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\n<mask token>\nfor i in range(12):\n soma += m[i][c]\n<mask token>\nprint('%.1f' % resultado)\n",
"step-3": "c = int(input())\nt = input()\nm = []\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\nsoma = 0\nfor i in range(12):\n soma += m[i][c]\nresultado = soma / (t == 'S' and 1 or 12)\nprint('%.1f' % resultado)\n",
"step-4": "# -*- coding: utf-8 -*-\n\nc = int(input())\nt = input()\nm = []\n\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\n\nsoma = 0\nfor i in range(12):\n soma += m[i][c]\n\nresultado = soma / (t == 'S' and 1 or 12)\nprint('%.1f' % resultado)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2.2 on 2019-10-19 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='phone_number',
field=models.CharField(max_length=15, verbose_name='phone number'),
),
]
|
normal
|
{
"blob_id": "7d25a8eb61b6fb9069616745c2b68fd3ceeca9fb",
"index": 6600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('account', '0001_initial')]\n operations = [migrations.AlterField(model_name='account', name=\n 'phone_number', field=models.CharField(max_length=15, verbose_name=\n 'phone number'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('account', '0001_initial')]\n operations = [migrations.AlterField(model_name='account', name=\n 'phone_number', field=models.CharField(max_length=15, verbose_name=\n 'phone number'))]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-10-19 14:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='phone_number',\n field=models.CharField(max_length=15, verbose_name='phone number'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PBAR(object):
<|reserved_special_token_0|>
def __init__(self, model):
"""
Defines the PCOMP object.
:param self: the PCOMP object
:param model: the BDF object
:param cards: the list of PCOMP cards
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
<|reserved_special_token_0|>
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.area = zeros(ncards, 'float64')
self.I1 = zeros(ncards, 'float64')
self.I2 = zeros(ncards, 'float64')
self.J = zeros(ncards, 'float64')
self.nsm = zeros(ncards, 'float64')
for i, card in enumerate(cards):
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
self.area[i] = double_or_blank(card, 3, 'area', 0.0)
self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)
self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)
Jdefault = 0.5 * (self.I1[i] + self.I2[i])
self.J[i] = double_or_blank(card, 6, 'J', Jdefault)
self.nsm[i] = double_or_blank(card, 7,
'non-structural_mass', 0.0)
if 0:
self.C1 = double_or_blank(card, 9, 'C1', 0.0)
self.C2 = double_or_blank(card, 10, 'C2', 0.0)
self.D1 = double_or_blank(card, 11, 'D1', 0.0)
self.D2 = double_or_blank(card, 12, 'D2', 0.0)
self.E1 = double_or_blank(card, 13, 'E1', 0.0)
self.E2 = double_or_blank(card, 14, 'E2', 0.0)
self.F1 = double_or_blank(card, 15, 'F1', 0.0)
self.F2 = double_or_blank(card, 16, 'F2', 0.0)
self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)
self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)
self.i12 = double_or_blank(card, 19, 'I12', 0.0)
if self.A == 0.0 and self.i12 == 0.0:
assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (
self.A, self.i12, self.K1)
assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (
self.A, self.i12, self.K2)
assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.area = self.area[i]
self.I1 = self.I1[i]
self.I2 = self.I2[i]
self.J = self.J[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PCOMP IDs...')
self._cards = []
self._comments = []
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PBAR(object):
<|reserved_special_token_0|>
def __init__(self, model):
"""
Defines the PCOMP object.
:param self: the PCOMP object
:param model: the BDF object
:param cards: the list of PCOMP cards
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def add(self, card, comment):
self._cards.append(card)
self._comments.append(comment)
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.area = zeros(ncards, 'float64')
self.I1 = zeros(ncards, 'float64')
self.I2 = zeros(ncards, 'float64')
self.J = zeros(ncards, 'float64')
self.nsm = zeros(ncards, 'float64')
for i, card in enumerate(cards):
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
self.area[i] = double_or_blank(card, 3, 'area', 0.0)
self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)
self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)
Jdefault = 0.5 * (self.I1[i] + self.I2[i])
self.J[i] = double_or_blank(card, 6, 'J', Jdefault)
self.nsm[i] = double_or_blank(card, 7,
'non-structural_mass', 0.0)
if 0:
self.C1 = double_or_blank(card, 9, 'C1', 0.0)
self.C2 = double_or_blank(card, 10, 'C2', 0.0)
self.D1 = double_or_blank(card, 11, 'D1', 0.0)
self.D2 = double_or_blank(card, 12, 'D2', 0.0)
self.E1 = double_or_blank(card, 13, 'E1', 0.0)
self.E2 = double_or_blank(card, 14, 'E2', 0.0)
self.F1 = double_or_blank(card, 15, 'F1', 0.0)
self.F2 = double_or_blank(card, 16, 'F2', 0.0)
self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)
self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)
self.i12 = double_or_blank(card, 19, 'I12', 0.0)
if self.A == 0.0 and self.i12 == 0.0:
assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (
self.A, self.i12, self.K1)
assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (
self.A, self.i12, self.K2)
assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.area = self.area[i]
self.I1 = self.I1[i]
self.I2 = self.I2[i]
self.J = self.J[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PCOMP IDs...')
self._cards = []
self._comments = []
def get_index(self, property_ids):
if isinstance(property_ids, int):
property_ids = array([property_ids])
if property_ids is None:
return arange(self.n)
indexs = searchsorted(self.property_id, property_ids)
assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,
property_ids)
return indexs
def write_bdf(self, f, size=8, property_ids=None):
if self.n:
if property_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.property_id, property_ids)
for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.
material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]
):
card = ['PBAR', pid, mid, area, I1, I2, J]
f.write(print_card_8(card))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PBAR(object):
type = 'PBAR'
def __init__(self, model):
"""
Defines the PCOMP object.
:param self: the PCOMP object
:param model: the BDF object
:param cards: the list of PCOMP cards
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def add(self, card, comment):
self._cards.append(card)
self._comments.append(comment)
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.area = zeros(ncards, 'float64')
self.I1 = zeros(ncards, 'float64')
self.I2 = zeros(ncards, 'float64')
self.J = zeros(ncards, 'float64')
self.nsm = zeros(ncards, 'float64')
for i, card in enumerate(cards):
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
self.area[i] = double_or_blank(card, 3, 'area', 0.0)
self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)
self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)
Jdefault = 0.5 * (self.I1[i] + self.I2[i])
self.J[i] = double_or_blank(card, 6, 'J', Jdefault)
self.nsm[i] = double_or_blank(card, 7,
'non-structural_mass', 0.0)
if 0:
self.C1 = double_or_blank(card, 9, 'C1', 0.0)
self.C2 = double_or_blank(card, 10, 'C2', 0.0)
self.D1 = double_or_blank(card, 11, 'D1', 0.0)
self.D2 = double_or_blank(card, 12, 'D2', 0.0)
self.E1 = double_or_blank(card, 13, 'E1', 0.0)
self.E2 = double_or_blank(card, 14, 'E2', 0.0)
self.F1 = double_or_blank(card, 15, 'F1', 0.0)
self.F2 = double_or_blank(card, 16, 'F2', 0.0)
self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)
self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)
self.i12 = double_or_blank(card, 19, 'I12', 0.0)
if self.A == 0.0 and self.i12 == 0.0:
assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (
self.A, self.i12, self.K1)
assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (
self.A, self.i12, self.K2)
assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.area = self.area[i]
self.I1 = self.I1[i]
self.I2 = self.I2[i]
self.J = self.J[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PCOMP IDs...')
self._cards = []
self._comments = []
def get_index(self, property_ids):
if isinstance(property_ids, int):
property_ids = array([property_ids])
if property_ids is None:
return arange(self.n)
indexs = searchsorted(self.property_id, property_ids)
assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,
property_ids)
return indexs
def write_bdf(self, f, size=8, property_ids=None):
if self.n:
if property_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.property_id, property_ids)
for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.
material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]
):
card = ['PBAR', pid, mid, area, I1, I2, J]
f.write(print_card_8(card))
<|reserved_special_token_1|>
from numpy import array, zeros, arange, concatenate, searchsorted, where, unique
from pyNastran.bdf.fieldWriter import print_card_8
from pyNastran.bdf.bdfInterface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, blank
class PBAR(object):
type = 'PBAR'
def __init__(self, model):
"""
Defines the PCOMP object.
:param self: the PCOMP object
:param model: the BDF object
:param cards: the list of PCOMP cards
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def add(self, card, comment):
self._cards.append(card)
self._comments.append(comment)
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.area = zeros(ncards, 'float64')
self.I1 = zeros(ncards, 'float64')
self.I2 = zeros(ncards, 'float64')
self.J = zeros(ncards, 'float64')
self.nsm = zeros(ncards, 'float64')
for i, card in enumerate(cards):
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
self.area[i] = double_or_blank(card, 3, 'area', 0.0)
self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)
self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)
Jdefault = 0.5 * (self.I1[i] + self.I2[i])
self.J[i] = double_or_blank(card, 6, 'J', Jdefault)
self.nsm[i] = double_or_blank(card, 7,
'non-structural_mass', 0.0)
if 0:
self.C1 = double_or_blank(card, 9, 'C1', 0.0)
self.C2 = double_or_blank(card, 10, 'C2', 0.0)
self.D1 = double_or_blank(card, 11, 'D1', 0.0)
self.D2 = double_or_blank(card, 12, 'D2', 0.0)
self.E1 = double_or_blank(card, 13, 'E1', 0.0)
self.E2 = double_or_blank(card, 14, 'E2', 0.0)
self.F1 = double_or_blank(card, 15, 'F1', 0.0)
self.F2 = double_or_blank(card, 16, 'F2', 0.0)
self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)
self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)
self.i12 = double_or_blank(card, 19, 'I12', 0.0)
if self.A == 0.0 and self.i12 == 0.0:
assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (
self.A, self.i12, self.K1)
assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (
self.A, self.i12, self.K2)
assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.area = self.area[i]
self.I1 = self.I1[i]
self.I2 = self.I2[i]
self.J = self.J[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PCOMP IDs...')
self._cards = []
self._comments = []
def get_index(self, property_ids):
if isinstance(property_ids, int):
property_ids = array([property_ids])
if property_ids is None:
return arange(self.n)
indexs = searchsorted(self.property_id, property_ids)
assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,
property_ids)
return indexs
def write_bdf(self, f, size=8, property_ids=None):
if self.n:
if property_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.property_id, property_ids)
for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.
material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]
):
card = ['PBAR', pid, mid, area, I1, I2, J]
f.write(print_card_8(card))
<|reserved_special_token_1|>
from numpy import array, zeros, arange, concatenate, searchsorted, where, unique
from pyNastran.bdf.fieldWriter import print_card_8
from pyNastran.bdf.bdfInterface.assign_type import (integer, integer_or_blank,
double_or_blank, integer_double_or_blank, blank)
class PBAR(object):
type = 'PBAR'
def __init__(self, model):
"""
Defines the PCOMP object.
:param self: the PCOMP object
:param model: the BDF object
:param cards: the list of PCOMP cards
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def add(self, card, comment):
self._cards.append(card)
self._comments.append(comment)
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
#: Property ID
self.property_id = zeros(ncards, 'int32')
self.material_id = zeros(ncards, 'int32')
self.area = zeros(ncards, 'float64')
self.I1 = zeros(ncards, 'float64')
self.I2 = zeros(ncards, 'float64')
self.J = zeros(ncards, 'float64')
self.nsm = zeros(ncards, 'float64')
for i, card in enumerate(cards):
#: property ID
self.property_id[i] = integer(card, 1, 'property_id')
#: material ID
self.material_id[i] = integer(card, 2, 'material_id')
#: material ID
self.area[i] = double_or_blank(card, 3, 'area', 0.0)
#: I1
self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)
#: I2
self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)
#: Polar Moment of Inertia J -> use J()
#: default=1/2(I1+I2) for SOL=600, otherwise 0.0
#: .. todo:: support SOL 600 default
Jdefault = 0.5 * (self.I1[i] + self.I2[i])
self.J[i] = double_or_blank(card, 6, 'J', Jdefault)
self.nsm[i] = double_or_blank(card, 7, 'non-structural_mass', 0.0)
if 0:
self.C1 = double_or_blank(card, 9, 'C1', 0.0)
self.C2 = double_or_blank(card, 10, 'C2', 0.0)
self.D1 = double_or_blank(card, 11, 'D1', 0.0)
self.D2 = double_or_blank(card, 12, 'D2', 0.0)
self.E1 = double_or_blank(card, 13, 'E1', 0.0)
self.E2 = double_or_blank(card, 14, 'E2', 0.0)
self.F1 = double_or_blank(card, 15, 'F1', 0.0)
self.F2 = double_or_blank(card, 16, 'F2', 0.0)
#: default=infinite; assume 1e8
self.K1 = double_or_blank(card, 17, 'K1', 1e8)
#: default=infinite; assume 1e8
self.K2 = double_or_blank(card, 18, 'K2', 1e8)
#: I12 -> use I12()
self.i12 = double_or_blank(card, 19, 'I12', 0.0)
if self.A == 0.0 and self.i12 == 0.0:
assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (self.A, self.i12, self.K1)
assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (self.A, self.i12, self.K2)
assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.area = self.area[i]
self.I1 = self.I1[i]
self.I2 = self.I2[i]
self.J = self.J[i]
self.nsm = self.nsm[i]
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PCOMP IDs...')
self._cards = []
self._comments = []
#=========================================================================
def get_index(self, property_ids):
if isinstance(property_ids, int):
property_ids = array([property_ids])
if property_ids is None:
return arange(self.n)
indexs = searchsorted(self.property_id, property_ids)
assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs, property_ids)
return indexs
#=========================================================================
def write_bdf(self, f, size=8, property_ids=None):
if self.n:
if property_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.property_id, property_ids)
for (pid, mid, area, I1, I2, J) in zip(self.property_id[i], self.material_id[i],
self.area[i], self.I1[i], self.I2[i], self.J[i]):
card = ['PBAR', pid, mid, area, I1, I2, J]
f.write(print_card_8(card))
|
flexible
|
{
"blob_id": "8f960ad465d0a7bf48752db35c73169be6da27d8",
"index": 9092,
"step-1": "<mask token>\n\n\nclass PBAR(object):\n <mask token>\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n <mask token>\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PBAR(object):\n <mask token>\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,\n property_ids)\n return indexs\n\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.\n material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]\n ):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n",
"step-3": "<mask token>\n\n\nclass PBAR(object):\n type = 'PBAR'\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,\n property_ids)\n return indexs\n\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.\n material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]\n ):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n",
"step-4": "from numpy import array, zeros, arange, concatenate, searchsorted, where, unique\nfrom pyNastran.bdf.fieldWriter import print_card_8\nfrom pyNastran.bdf.bdfInterface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, blank\n\n\nclass PBAR(object):\n type = 'PBAR'\n\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n if ncards:\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n for i, card in enumerate(cards):\n self.property_id[i] = integer(card, 1, 'property_id')\n self.material_id[i] = integer(card, 2, 'material_id')\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7,\n 'non-structural_mass', 0.0)\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n self.K1 = double_or_blank(card, 17, 'K1', 100000000.0)\n self.K2 = double_or_blank(card, 18, 'K2', 100000000.0)\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (\n self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (\n self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n unique_pids = unique(self.property_id)\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs,\n property_ids)\n return indexs\n\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n for pid, mid, area, I1, I2, J in zip(self.property_id[i], self.\n material_id[i], self.area[i], self.I1[i], self.I2[i], self.J[i]\n ):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n",
"step-5": "from numpy import array, zeros, arange, concatenate, searchsorted, where, unique\n\nfrom pyNastran.bdf.fieldWriter import print_card_8\nfrom pyNastran.bdf.bdfInterface.assign_type import (integer, integer_or_blank,\n double_or_blank, integer_double_or_blank, blank)\n\n\nclass PBAR(object):\n type = 'PBAR'\n def __init__(self, model):\n \"\"\"\n Defines the PCOMP object.\n\n :param self: the PCOMP object\n :param model: the BDF object\n :param cards: the list of PCOMP cards\n \"\"\"\n self.model = model\n self.n = 0\n self._cards = []\n self._comments = []\n\n def add(self, card, comment):\n self._cards.append(card)\n self._comments.append(comment)\n\n def build(self):\n cards = self._cards\n ncards = len(cards)\n self.n = ncards\n\n if ncards:\n #: Property ID\n self.property_id = zeros(ncards, 'int32')\n self.material_id = zeros(ncards, 'int32')\n self.area = zeros(ncards, 'float64')\n self.I1 = zeros(ncards, 'float64')\n self.I2 = zeros(ncards, 'float64')\n self.J = zeros(ncards, 'float64')\n self.nsm = zeros(ncards, 'float64')\n\n for i, card in enumerate(cards):\n #: property ID\n self.property_id[i] = integer(card, 1, 'property_id')\n\n #: material ID\n self.material_id[i] = integer(card, 2, 'material_id')\n\n\n #: material ID\n self.area[i] = double_or_blank(card, 3, 'area', 0.0)\n\n #: I1\n self.I1[i] = double_or_blank(card, 4, 'I1', 0.0)\n\n #: I2\n self.I2[i] = double_or_blank(card, 5, 'I2', 0.0)\n\n #: Polar Moment of Inertia J -> use J()\n #: default=1/2(I1+I2) for SOL=600, otherwise 0.0\n #: .. todo:: support SOL 600 default\n\n Jdefault = 0.5 * (self.I1[i] + self.I2[i])\n self.J[i] = double_or_blank(card, 6, 'J', Jdefault)\n self.nsm[i] = double_or_blank(card, 7, 'non-structural_mass', 0.0)\n\n if 0:\n self.C1 = double_or_blank(card, 9, 'C1', 0.0)\n self.C2 = double_or_blank(card, 10, 'C2', 0.0)\n self.D1 = double_or_blank(card, 11, 'D1', 0.0)\n self.D2 = double_or_blank(card, 12, 'D2', 0.0)\n self.E1 = double_or_blank(card, 13, 'E1', 0.0)\n self.E2 = double_or_blank(card, 14, 'E2', 0.0)\n self.F1 = double_or_blank(card, 15, 'F1', 0.0)\n self.F2 = double_or_blank(card, 16, 'F2', 0.0)\n\n #: default=infinite; assume 1e8\n self.K1 = double_or_blank(card, 17, 'K1', 1e8)\n #: default=infinite; assume 1e8\n self.K2 = double_or_blank(card, 18, 'K2', 1e8)\n #: I12 -> use I12()\n self.i12 = double_or_blank(card, 19, 'I12', 0.0)\n if self.A == 0.0 and self.i12 == 0.0:\n assert self.K1 is None, 'K1 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K1=%r' % (self.A, self.i12, self.K1)\n assert self.K2 is None, 'K2 must be blank if A=0.0 and I12=0.0; A=%r I12=%r K2=%r' % (self.A, self.i12, self.K2)\n assert len(card) <= 20, 'len(PBAR card) = %i' % len(card)\n\n i = self.property_id.argsort()\n self.property_id = self.property_id[i]\n self.material_id = self.material_id[i]\n\n self.area = self.area[i]\n self.I1 = self.I1[i]\n self.I2 = self.I2[i]\n self.J = self.J[i]\n self.nsm = self.nsm[i]\n\n unique_pids = unique(self.property_id)\n\n if len(unique_pids) != len(self.property_id):\n raise RuntimeError('There are duplicate PCOMP IDs...')\n self._cards = []\n self._comments = []\n\n #=========================================================================\n def get_index(self, property_ids):\n if isinstance(property_ids, int):\n property_ids = array([property_ids])\n if property_ids is None:\n return arange(self.n)\n\n indexs = searchsorted(self.property_id, property_ids)\n assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs, property_ids)\n return indexs\n\n #=========================================================================\n def write_bdf(self, f, size=8, property_ids=None):\n if self.n:\n if property_ids is None:\n i = arange(self.n)\n else:\n i = searchsorted(self.property_id, property_ids)\n\n for (pid, mid, area, I1, I2, J) in zip(self.property_id[i], self.material_id[i],\n self.area[i], self.I1[i], self.I2[i], self.J[i]):\n card = ['PBAR', pid, mid, area, I1, I2, J]\n f.write(print_card_8(card))\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def main(_):
writer_train = tf.python_io.TFRecordWriter('./data/train.record')
writer_test = tf.python_io.TFRecordWriter('./data/test.record')
filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')
init = tf.global_variables_initializer(), tf.local_variables_initializer()
sess = tf.Session()
sess.run(init)
list = sess.run(filename_list)
random.shuffle(list)
i = 1
tst = 0
trn = 0
for xml_file in list:
example = create_example(xml_file)
if i % 5 == 0:
writer_test.write(example.SerializeToString())
tst = tst + 1
else:
writer_train.write(example.SerializeToString())
trn = trn + 1
i = i + 1
print(xml_file)
writer_test.close()
writer_train.close()
print('Successfully converted dataset to TFRecord.')
print('training dataset: # ')
print(trn)
print('test dataset: # ')
print(tst)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_example(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
image_name = root.find('filename').text
file_name = image_name.encode('utf8')
size = root.find('size')
width = int(size[0].text)
height = int(size[1].text)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for member in root.findall('object'):
classes_text.append(member[0].text)
def class_text_to_int(row_label):
if row_label == 'car-red':
return 1
if row_label == 'car-blue':
return 2
if row_label == 'phone':
return 3
classes.append(class_text_to_int(member[0].text))
xmin.append(float(member[4][0].text) / width)
ymin.append(float(member[4][1].text) / height)
xmax.append(float(member[4][2].text) / width)
ymax.append(float(member[4][3].text) / height)
difficult_obj.append(0)
truncated.append(0)
poses.append('Unspecified'.encode('utf8'))
full_path = os.path.join('./data/images', '{}'.format(image_name))
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height), 'image/width':
dataset_util.int64_feature(width), 'image/filename': dataset_util.
bytes_feature(file_name), 'image/source_id': dataset_util.
bytes_feature(file_name), 'image/key/sha256': dataset_util.
bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.
bytes_feature(encoded_jpg), 'image/format': dataset_util.
bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax), 'image/object/class/text':
dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes
), 'image/object/difficult': dataset_util.int64_list_feature(
difficult_obj), 'image/object/truncated': dataset_util.
int64_list_feature(truncated), 'image/object/view': dataset_util.
bytes_list_feature(poses)}))
return example
def main(_):
writer_train = tf.python_io.TFRecordWriter('./data/train.record')
writer_test = tf.python_io.TFRecordWriter('./data/test.record')
filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')
init = tf.global_variables_initializer(), tf.local_variables_initializer()
sess = tf.Session()
sess.run(init)
list = sess.run(filename_list)
random.shuffle(list)
i = 1
tst = 0
trn = 0
for xml_file in list:
example = create_example(xml_file)
if i % 5 == 0:
writer_test.write(example.SerializeToString())
tst = tst + 1
else:
writer_train.write(example.SerializeToString())
trn = trn + 1
i = i + 1
print(xml_file)
writer_test.close()
writer_train.close()
print('Successfully converted dataset to TFRecord.')
print('training dataset: # ')
print(trn)
print('test dataset: # ')
print(tst)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_example(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
image_name = root.find('filename').text
file_name = image_name.encode('utf8')
size = root.find('size')
width = int(size[0].text)
height = int(size[1].text)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for member in root.findall('object'):
classes_text.append(member[0].text)
def class_text_to_int(row_label):
if row_label == 'car-red':
return 1
if row_label == 'car-blue':
return 2
if row_label == 'phone':
return 3
classes.append(class_text_to_int(member[0].text))
xmin.append(float(member[4][0].text) / width)
ymin.append(float(member[4][1].text) / height)
xmax.append(float(member[4][2].text) / width)
ymax.append(float(member[4][3].text) / height)
difficult_obj.append(0)
truncated.append(0)
poses.append('Unspecified'.encode('utf8'))
full_path = os.path.join('./data/images', '{}'.format(image_name))
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height), 'image/width':
dataset_util.int64_feature(width), 'image/filename': dataset_util.
bytes_feature(file_name), 'image/source_id': dataset_util.
bytes_feature(file_name), 'image/key/sha256': dataset_util.
bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.
bytes_feature(encoded_jpg), 'image/format': dataset_util.
bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax), 'image/object/class/text':
dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes
), 'image/object/difficult': dataset_util.int64_list_feature(
difficult_obj), 'image/object/truncated': dataset_util.
int64_list_feature(truncated), 'image/object/view': dataset_util.
bytes_list_feature(poses)}))
return example
def main(_):
writer_train = tf.python_io.TFRecordWriter('./data/train.record')
writer_test = tf.python_io.TFRecordWriter('./data/test.record')
filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')
init = tf.global_variables_initializer(), tf.local_variables_initializer()
sess = tf.Session()
sess.run(init)
list = sess.run(filename_list)
random.shuffle(list)
i = 1
tst = 0
trn = 0
for xml_file in list:
example = create_example(xml_file)
if i % 5 == 0:
writer_test.write(example.SerializeToString())
tst = tst + 1
else:
writer_train.write(example.SerializeToString())
trn = trn + 1
i = i + 1
print(xml_file)
writer_test.close()
writer_train.close()
print('Successfully converted dataset to TFRecord.')
print('training dataset: # ')
print(trn)
print('test dataset: # ')
print(tst)
if __name__ == '__main__':
tf.app.run()
<|reserved_special_token_1|>
import tensorflow as tf
from object_detection.utils import dataset_util
import os
import io
import hashlib
import xml.etree.ElementTree as ET
import random
from PIL import Image
def create_example(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
image_name = root.find('filename').text
file_name = image_name.encode('utf8')
size = root.find('size')
width = int(size[0].text)
height = int(size[1].text)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for member in root.findall('object'):
classes_text.append(member[0].text)
def class_text_to_int(row_label):
if row_label == 'car-red':
return 1
if row_label == 'car-blue':
return 2
if row_label == 'phone':
return 3
classes.append(class_text_to_int(member[0].text))
xmin.append(float(member[4][0].text) / width)
ymin.append(float(member[4][1].text) / height)
xmax.append(float(member[4][2].text) / width)
ymax.append(float(member[4][3].text) / height)
difficult_obj.append(0)
truncated.append(0)
poses.append('Unspecified'.encode('utf8'))
full_path = os.path.join('./data/images', '{}'.format(image_name))
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height), 'image/width':
dataset_util.int64_feature(width), 'image/filename': dataset_util.
bytes_feature(file_name), 'image/source_id': dataset_util.
bytes_feature(file_name), 'image/key/sha256': dataset_util.
bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.
bytes_feature(encoded_jpg), 'image/format': dataset_util.
bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax), 'image/object/class/text':
dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes
), 'image/object/difficult': dataset_util.int64_list_feature(
difficult_obj), 'image/object/truncated': dataset_util.
int64_list_feature(truncated), 'image/object/view': dataset_util.
bytes_list_feature(poses)}))
return example
def main(_):
writer_train = tf.python_io.TFRecordWriter('./data/train.record')
writer_test = tf.python_io.TFRecordWriter('./data/test.record')
filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')
init = tf.global_variables_initializer(), tf.local_variables_initializer()
sess = tf.Session()
sess.run(init)
list = sess.run(filename_list)
random.shuffle(list)
i = 1
tst = 0
trn = 0
for xml_file in list:
example = create_example(xml_file)
if i % 5 == 0:
writer_test.write(example.SerializeToString())
tst = tst + 1
else:
writer_train.write(example.SerializeToString())
trn = trn + 1
i = i + 1
print(xml_file)
writer_test.close()
writer_train.close()
print('Successfully converted dataset to TFRecord.')
print('training dataset: # ')
print(trn)
print('test dataset: # ')
print(tst)
if __name__ == '__main__':
tf.app.run()
<|reserved_special_token_1|>
# from https://github.com/tensorflow/models/tree/master/research/object_detection/dataset_tools
# and https://gist.github.com/saghiralfasly/ee642af0616461145a9a82d7317fb1d6
import tensorflow as tf
from object_detection.utils import dataset_util
import os
import io
import hashlib
import xml.etree.ElementTree as ET
import random
from PIL import Image
def create_example(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
image_name = root.find('filename').text
file_name = image_name.encode('utf8')
size=root.find('size')
width = int(size[0].text)
height = int(size[1].text)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for member in root.findall('object'):
classes_text.append(member[0].text)
def class_text_to_int(row_label):
if row_label == 'car-red':
return 1
if row_label == 'car-blue':
return 2
if row_label == 'phone':
return 3
classes.append(class_text_to_int(member[0].text))
xmin.append(float(member[4][0].text) / width)
ymin.append(float(member[4][1].text) / height)
xmax.append(float(member[4][2].text) / width)
ymax.append(float(member[4][3].text) / height)
difficult_obj.append(0)
truncated.append(0)
poses.append('Unspecified'.encode('utf8'))
full_path = os.path.join('./data/images', '{}'.format(image_name))
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(file_name),
'image/source_id': dataset_util.bytes_feature(file_name),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def main(_):
writer_train = tf.python_io.TFRecordWriter('./data/train.record')
writer_test = tf.python_io.TFRecordWriter('./data/test.record')
filename_list=tf.train.match_filenames_once("./data/annotations/*.xml")
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
sess=tf.Session()
sess.run(init)
list=sess.run(filename_list)
random.shuffle(list)
i=1
tst=0
trn=0
for xml_file in list:
example = create_example(xml_file)
if (i%5)==0:
writer_test.write(example.SerializeToString())
tst=tst+1
else:
writer_train.write(example.SerializeToString())
trn=trn+1
i=i+1
print(xml_file)
writer_test.close()
writer_train.close()
print('Successfully converted dataset to TFRecord.')
print('training dataset: # ')
print(trn)
print('test dataset: # ')
print(tst)
if __name__ == '__main__':
tf.app.run()
|
flexible
|
{
"blob_id": "8142585827590f6d951f0fcc375e8511aa75e9c8",
"index": 7320,
"step-1": "<mask token>\n\n\ndef main(_):\n writer_train = tf.python_io.TFRecordWriter('./data/train.record')\n writer_test = tf.python_io.TFRecordWriter('./data/test.record')\n filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')\n init = tf.global_variables_initializer(), tf.local_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n list = sess.run(filename_list)\n random.shuffle(list)\n i = 1\n tst = 0\n trn = 0\n for xml_file in list:\n example = create_example(xml_file)\n if i % 5 == 0:\n writer_test.write(example.SerializeToString())\n tst = tst + 1\n else:\n writer_train.write(example.SerializeToString())\n trn = trn + 1\n i = i + 1\n print(xml_file)\n writer_test.close()\n writer_train.close()\n print('Successfully converted dataset to TFRecord.')\n print('training dataset: # ')\n print(trn)\n print('test dataset: # ')\n print(tst)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_example(xml_file):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n image_name = root.find('filename').text\n file_name = image_name.encode('utf8')\n size = root.find('size')\n width = int(size[0].text)\n height = int(size[1].text)\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n for member in root.findall('object'):\n classes_text.append(member[0].text)\n\n def class_text_to_int(row_label):\n if row_label == 'car-red':\n return 1\n if row_label == 'car-blue':\n return 2\n if row_label == 'phone':\n return 3\n classes.append(class_text_to_int(member[0].text))\n xmin.append(float(member[4][0].text) / width)\n ymin.append(float(member[4][1].text) / height)\n xmax.append(float(member[4][2].text) / width)\n ymax.append(float(member[4][3].text) / height)\n difficult_obj.append(0)\n truncated.append(0)\n poses.append('Unspecified'.encode('utf8'))\n full_path = os.path.join('./data/images', '{}'.format(image_name))\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height), 'image/width':\n dataset_util.int64_feature(width), 'image/filename': dataset_util.\n bytes_feature(file_name), 'image/source_id': dataset_util.\n bytes_feature(file_name), 'image/key/sha256': dataset_util.\n bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.\n bytes_feature(encoded_jpg), 'image/format': dataset_util.\n bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin':\n dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax':\n dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin':\n dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax':\n dataset_util.float_list_feature(ymax), 'image/object/class/text':\n dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes\n ), 'image/object/difficult': dataset_util.int64_list_feature(\n difficult_obj), 'image/object/truncated': dataset_util.\n int64_list_feature(truncated), 'image/object/view': dataset_util.\n bytes_list_feature(poses)}))\n return example\n\n\ndef main(_):\n writer_train = tf.python_io.TFRecordWriter('./data/train.record')\n writer_test = tf.python_io.TFRecordWriter('./data/test.record')\n filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')\n init = tf.global_variables_initializer(), tf.local_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n list = sess.run(filename_list)\n random.shuffle(list)\n i = 1\n tst = 0\n trn = 0\n for xml_file in list:\n example = create_example(xml_file)\n if i % 5 == 0:\n writer_test.write(example.SerializeToString())\n tst = tst + 1\n else:\n writer_train.write(example.SerializeToString())\n trn = trn + 1\n i = i + 1\n print(xml_file)\n writer_test.close()\n writer_train.close()\n print('Successfully converted dataset to TFRecord.')\n print('training dataset: # ')\n print(trn)\n print('test dataset: # ')\n print(tst)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_example(xml_file):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n image_name = root.find('filename').text\n file_name = image_name.encode('utf8')\n size = root.find('size')\n width = int(size[0].text)\n height = int(size[1].text)\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n for member in root.findall('object'):\n classes_text.append(member[0].text)\n\n def class_text_to_int(row_label):\n if row_label == 'car-red':\n return 1\n if row_label == 'car-blue':\n return 2\n if row_label == 'phone':\n return 3\n classes.append(class_text_to_int(member[0].text))\n xmin.append(float(member[4][0].text) / width)\n ymin.append(float(member[4][1].text) / height)\n xmax.append(float(member[4][2].text) / width)\n ymax.append(float(member[4][3].text) / height)\n difficult_obj.append(0)\n truncated.append(0)\n poses.append('Unspecified'.encode('utf8'))\n full_path = os.path.join('./data/images', '{}'.format(image_name))\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height), 'image/width':\n dataset_util.int64_feature(width), 'image/filename': dataset_util.\n bytes_feature(file_name), 'image/source_id': dataset_util.\n bytes_feature(file_name), 'image/key/sha256': dataset_util.\n bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.\n bytes_feature(encoded_jpg), 'image/format': dataset_util.\n bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin':\n dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax':\n dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin':\n dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax':\n dataset_util.float_list_feature(ymax), 'image/object/class/text':\n dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes\n ), 'image/object/difficult': dataset_util.int64_list_feature(\n difficult_obj), 'image/object/truncated': dataset_util.\n int64_list_feature(truncated), 'image/object/view': dataset_util.\n bytes_list_feature(poses)}))\n return example\n\n\ndef main(_):\n writer_train = tf.python_io.TFRecordWriter('./data/train.record')\n writer_test = tf.python_io.TFRecordWriter('./data/test.record')\n filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')\n init = tf.global_variables_initializer(), tf.local_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n list = sess.run(filename_list)\n random.shuffle(list)\n i = 1\n tst = 0\n trn = 0\n for xml_file in list:\n example = create_example(xml_file)\n if i % 5 == 0:\n writer_test.write(example.SerializeToString())\n tst = tst + 1\n else:\n writer_train.write(example.SerializeToString())\n trn = trn + 1\n i = i + 1\n print(xml_file)\n writer_test.close()\n writer_train.close()\n print('Successfully converted dataset to TFRecord.')\n print('training dataset: # ')\n print(trn)\n print('test dataset: # ')\n print(tst)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-4": "import tensorflow as tf\nfrom object_detection.utils import dataset_util\nimport os\nimport io\nimport hashlib\nimport xml.etree.ElementTree as ET\nimport random\nfrom PIL import Image\n\n\ndef create_example(xml_file):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n image_name = root.find('filename').text\n file_name = image_name.encode('utf8')\n size = root.find('size')\n width = int(size[0].text)\n height = int(size[1].text)\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n for member in root.findall('object'):\n classes_text.append(member[0].text)\n\n def class_text_to_int(row_label):\n if row_label == 'car-red':\n return 1\n if row_label == 'car-blue':\n return 2\n if row_label == 'phone':\n return 3\n classes.append(class_text_to_int(member[0].text))\n xmin.append(float(member[4][0].text) / width)\n ymin.append(float(member[4][1].text) / height)\n xmax.append(float(member[4][2].text) / width)\n ymax.append(float(member[4][3].text) / height)\n difficult_obj.append(0)\n truncated.append(0)\n poses.append('Unspecified'.encode('utf8'))\n full_path = os.path.join('./data/images', '{}'.format(image_name))\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height), 'image/width':\n dataset_util.int64_feature(width), 'image/filename': dataset_util.\n bytes_feature(file_name), 'image/source_id': dataset_util.\n bytes_feature(file_name), 'image/key/sha256': dataset_util.\n bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.\n bytes_feature(encoded_jpg), 'image/format': dataset_util.\n bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin':\n dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax':\n dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin':\n dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax':\n dataset_util.float_list_feature(ymax), 'image/object/class/text':\n dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes\n ), 'image/object/difficult': dataset_util.int64_list_feature(\n difficult_obj), 'image/object/truncated': dataset_util.\n int64_list_feature(truncated), 'image/object/view': dataset_util.\n bytes_list_feature(poses)}))\n return example\n\n\ndef main(_):\n writer_train = tf.python_io.TFRecordWriter('./data/train.record')\n writer_test = tf.python_io.TFRecordWriter('./data/test.record')\n filename_list = tf.train.match_filenames_once('./data/annotations/*.xml')\n init = tf.global_variables_initializer(), tf.local_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n list = sess.run(filename_list)\n random.shuffle(list)\n i = 1\n tst = 0\n trn = 0\n for xml_file in list:\n example = create_example(xml_file)\n if i % 5 == 0:\n writer_test.write(example.SerializeToString())\n tst = tst + 1\n else:\n writer_train.write(example.SerializeToString())\n trn = trn + 1\n i = i + 1\n print(xml_file)\n writer_test.close()\n writer_train.close()\n print('Successfully converted dataset to TFRecord.')\n print('training dataset: # ')\n print(trn)\n print('test dataset: # ')\n print(tst)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"step-5": "# from https://github.com/tensorflow/models/tree/master/research/object_detection/dataset_tools\n# and https://gist.github.com/saghiralfasly/ee642af0616461145a9a82d7317fb1d6\n \nimport tensorflow as tf\nfrom object_detection.utils import dataset_util\nimport os\nimport io\nimport hashlib\nimport xml.etree.ElementTree as ET\nimport random\nfrom PIL import Image\n\ndef create_example(xml_file):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n image_name = root.find('filename').text\n file_name = image_name.encode('utf8')\n size=root.find('size')\n width = int(size[0].text)\n height = int(size[1].text)\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n for member in root.findall('object'):\n classes_text.append(member[0].text)\n\n def class_text_to_int(row_label):\n if row_label == 'car-red':\n return 1\n if row_label == 'car-blue':\n return 2\n if row_label == 'phone':\n return 3\n\n classes.append(class_text_to_int(member[0].text))\n\n xmin.append(float(member[4][0].text) / width)\n ymin.append(float(member[4][1].text) / height)\n xmax.append(float(member[4][2].text) / width)\n ymax.append(float(member[4][3].text) / height)\n difficult_obj.append(0)\n truncated.append(0)\n poses.append('Unspecified'.encode('utf8'))\n\n full_path = os.path.join('./data/images', '{}'.format(image_name))\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\t\t\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(file_name),\n 'image/source_id': dataset_util.bytes_feature(file_name),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\t\n return example\t\n\t\t\ndef main(_):\n writer_train = tf.python_io.TFRecordWriter('./data/train.record') \n writer_test = tf.python_io.TFRecordWriter('./data/test.record')\n filename_list=tf.train.match_filenames_once(\"./data/annotations/*.xml\")\n init = (tf.global_variables_initializer(), tf.local_variables_initializer())\n sess=tf.Session()\n sess.run(init)\n list=sess.run(filename_list)\n random.shuffle(list) \n i=1 \n tst=0\n trn=0 \n for xml_file in list:\n example = create_example(xml_file)\n if (i%5)==0: \n writer_test.write(example.SerializeToString())\n tst=tst+1\n else: \n writer_train.write(example.SerializeToString())\n trn=trn+1\n i=i+1\n print(xml_file)\n writer_test.close()\n writer_train.close()\n print('Successfully converted dataset to TFRecord.')\n print('training dataset: # ')\n print(trn)\n print('test dataset: # ')\n print(tst)\t\n\t\nif __name__ == '__main__':\n tf.app.run()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Mood(object):
GENERIC = 1
HIGH_TEMP = 2
LOW_TEMP = 3
HIGH_DUST = 4
LOW_DUST = 5
def decision(self, data):
temp = float(data)
if temp <= 10:
return self.LOW_TEMP
if temp > 30:
return self.HIGH_TEMP
if (10 < temp <=30):
return self.GENERIC
|
normal
|
{
"blob_id": "511016b9cd54f6824360d609ede233b9cc3e4447",
"index": 7564,
"step-1": "<mask token>\n",
"step-2": "class Mood(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Mood(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def decision(self, data):\n temp = float(data)\n if temp <= 10:\n return self.LOW_TEMP\n if temp > 30:\n return self.HIGH_TEMP\n if 10 < temp <= 30:\n return self.GENERIC\n",
"step-4": "class Mood(object):\n GENERIC = 1\n HIGH_TEMP = 2\n LOW_TEMP = 3\n HIGH_DUST = 4\n LOW_DUST = 5\n\n def decision(self, data):\n temp = float(data)\n if temp <= 10:\n return self.LOW_TEMP\n if temp > 30:\n return self.HIGH_TEMP\n if 10 < temp <= 30:\n return self.GENERIC\n",
"step-5": "class Mood(object):\n\n GENERIC = 1\n HIGH_TEMP = 2\n LOW_TEMP = 3\n HIGH_DUST = 4\n LOW_DUST = 5\n\n def decision(self, data):\n temp = float(data)\n\n if temp <= 10:\n return self.LOW_TEMP\n\n if temp > 30:\n return self.HIGH_TEMP\n\n if (10 < temp <=30):\n return self.GENERIC\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Category(Document):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Category(Document):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def get_category_by_text(cls, category_text: str) ->'Category':
try:
category = cls.objects.get(Q(name=category_text) | Q(aliases=
category_text.lower()))
except Category.DoesNotExist:
raise exceptions.InvalidCategory(
'Нет такой категории по имени или алиасам')
return category
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Category(Document):
id = StringField(primary_key=True)
name = StringField()
is_base_expenses = BooleanField(default=False)
aliases = ListField(StringField())
@classmethod
def get_category_by_text(cls, category_text: str) ->'Category':
try:
category = cls.objects.get(Q(name=category_text) | Q(aliases=
category_text.lower()))
except Category.DoesNotExist:
raise exceptions.InvalidCategory(
'Нет такой категории по имени или алиасам')
return category
<|reserved_special_token_1|>
from mongoengine import Document, StringField, BooleanField, ListField, Q
import exceptions
class Category(Document):
id = StringField(primary_key=True)
name = StringField()
is_base_expenses = BooleanField(default=False)
aliases = ListField(StringField())
@classmethod
def get_category_by_text(cls, category_text: str) ->'Category':
try:
category = cls.objects.get(Q(name=category_text) | Q(aliases=
category_text.lower()))
except Category.DoesNotExist:
raise exceptions.InvalidCategory(
'Нет такой категории по имени или алиасам')
return category
|
flexible
|
{
"blob_id": "63d9a0fa0d0747762e65f6f1e85e53090035454c",
"index": 583,
"step-1": "<mask token>\n\n\nclass Category(Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Category(Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"step-3": "<mask token>\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"step-4": "from mongoengine import Document, StringField, BooleanField, ListField, Q\nimport exceptions\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
/home/pushkar/anaconda3/lib/python3.6/_bootlocale.py
|
normal
|
{
"blob_id": "ea4e4c8067d9e910b8d4c6a1c4c01f1ef70d7341",
"index": 7410,
"step-1": "/home/pushkar/anaconda3/lib/python3.6/_bootlocale.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def downgrade():
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade():
op.create_index(op.f('ix_account_interface'), 'account', ['interface'],
unique=False)
op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)
op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],
unique=False)
def downgrade():
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '3e4ee9eaaeaa'
down_revision = '6d58871d74a0'
<|reserved_special_token_0|>
def upgrade():
op.create_index(op.f('ix_account_interface'), 'account', ['interface'],
unique=False)
op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)
op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],
unique=False)
def downgrade():
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '3e4ee9eaaeaa'
down_revision = '6d58871d74a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index(op.f('ix_account_interface'), 'account', ['interface'],
unique=False)
op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)
op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],
unique=False)
def downgrade():
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
<|reserved_special_token_1|>
"""empty message
Revision ID: 3e4ee9eaaeaa
Revises: 6d58871d74a0
Create Date: 2016-07-25 15:30:38.008238
"""
# revision identifiers, used by Alembic.
revision = '3e4ee9eaaeaa'
down_revision = '6d58871d74a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_account_interface'), 'account', ['interface'], unique=False)
op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)
op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
### end Alembic commands ###
|
flexible
|
{
"blob_id": "db49313d2bc8b9f0be0dfd48c6065ea0ab3294cb",
"index": 4032,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'],\n unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],\n unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-3": "<mask token>\nrevision = '3e4ee9eaaeaa'\ndown_revision = '6d58871d74a0'\n<mask token>\n\n\ndef upgrade():\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'],\n unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],\n unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-4": "<mask token>\nrevision = '3e4ee9eaaeaa'\ndown_revision = '6d58871d74a0'\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'],\n unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],\n unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 3e4ee9eaaeaa\nRevises: 6d58871d74a0\nCreate Date: 2016-07-25 15:30:38.008238\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3e4ee9eaaeaa'\ndown_revision = '6d58871d74a0'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'], unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
import os
from pyparsing import *
import csv
def parse_cave_details(details):
##########################################################################
# Define the Bretz Grammar.
# Sample cave description:
# Boring Caverns SE1/4 NW1/4 sec. 16, T. 37 N., R. 10 W., Pulaski County Not shown on Waynesville Quadrangle map The mouth of this cave ...\n
# Another Cave S1/2 sec. 15, T. 36 N., R. 12 W., Pulaski County Not shown on Waynesville Quadrangle map There are two large caves...\n
# Something Bridge Sec. 15 or 22, T. 36 N., R. 13 W., Pulaski County Not shown on Richland Quadrangle map This cave is near Ozark...\n
#
# CAVE ::= CAVE_NAME [ALIQUOT_PART] SECTION, TOWNSHIP, RANGE, COUNTY QUAD_MAP DESCRIPTION
# ALIQUOT_PART ::= (((NE|SE|SW|NW)1/4)|((N|E|S|W)1/2))*
# SECTION ::= (S|s)ec. num+
# TOWNSHIP ::= T. num+ TOWNSHIP_DIR.
# TOWNSHIP_DIR ::= N|S
# RANGE ::= R. num+ RANGE_DIR.
# RANGE_DIR ::= E|W
# COUNTY = WORD+ County
# QUAD_MAP = (Not s|S)hown on QUAD Quadrangle map
# QUAD = WORD+
# DESCRIPTION = WORD+
aliquotQuadrantID = Literal("NE") |\
Literal("SE") |\
Literal("SW") |\
Literal("NW")
aliquotQuadrantString = aliquotQuadrantID + Suppress("1/4")
aliquotHalfString = oneOf("N E S W") + Suppress("1/2")
aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString))\
.setResultsName("aliquot")\
.setParseAction(lambda kwd: " ".join(kwd[0]))
sectionToken = Suppress(oneOf("S s") + Literal("ec") + Optional("."))
sectionNumber = Word(nums)
section = Group(
sectionToken \
+ sectionNumber \
+ ZeroOrMore(Suppress("or") + sectionNumber)
).setResultsName("section")
afterEndOfCaveName = aliquotHalfString | aliquotQuadrantString | sectionToken
caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables)))\
.setResultsName('name')\
.setParseAction(lambda name: " ".join(name[0]))
townshipDirection = oneOf("N S").setResultsName("direction")
townshipNumber = Word(nums).setResultsName("number")
township = Suppress("T.") \
+ Group(townshipNumber + townshipDirection).setResultsName("township")\
+ Suppress('.')
rangeDirection = oneOf("E W").setResultsName("direction")
rangeNumber = Word(nums).setResultsName("number")
range_info = Suppress("R.") \
+ Group(rangeNumber + rangeDirection).setResultsName("range")\
+ Suppress('.')
countyKeyword = Literal("County")
countyName = Group(OneOrMore(~countyKeyword + Word(alphas+"-'.")))\
.setResultsName("county")\
.setParseAction(lambda c: " ".join(c[0]))
county = countyName + Suppress("County")
notShownOnQuad = (Literal("Not") + Suppress("s"))\
.setParseAction(lambda x: False)
shownOnQuad = Literal("S").setParseAction(lambda x: True)
onKeyword = Literal("on")
mapAlias = Group(OneOrMore(~onKeyword + Word(printables)))\
.setParseAction(lambda alias: " ".join(alias[0]))\
.setResultsName("alias")
quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName("is_on_map")\
+ Suppress("hown") \
+ Optional(Suppress('as') + mapAlias)\
+ Suppress(onKeyword)
quadrangleKeyword = Literal("Quadrangle") + Literal("map")
quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas+"-'.")))\
.setResultsName("name")\
.setParseAction(lambda name: " ".join(name[0]))
quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName("quad") \
+ Suppress(quadrangleKeyword)
description = Group(ZeroOrMore(Word(alphanums + printables)))\
.setResultsName("description")\
.setParseAction(lambda desc: " ".join(desc[0]))
location = caveName \
+ aliquotPart \
+ section + Suppress(',') \
+ township + Suppress(',') \
+ range_info + Suppress(',')\
+ county \
+ quadrangle \
+ description
return location.parseString(details)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("ERROR: pass in the filename as the second argument.")
print(" $ python {0} /path/to/file.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
with open(filepath) as f:
raw_text = f.read()
raw_caves = raw_text.split("\n")
caves = []
for raw_cave_text in raw_caves:
raw_cave_text = raw_cave_text.strip()
if raw_cave_text:
try:
cave = parse_cave_details(raw_cave_text)
caves.append({
'Cave name': cave.name,
'Alias': cave.quad.alias,
'On map': cave.quad.is_on_map,
'Quad': cave.quad.name,
'County': cave.county,
'State': 'MO',
'Principal Meridian Code': 5,
'Township Number': cave.township.number,
'Township Fraction': 0,
'Township Direction': cave.township.direction,
'Range Number': cave.range.number,
'Range Fraction': 0,
'Range Direction': cave.range.direction,
'Section': cave.section[0],
'Section Division': "".join(cave.aliquot),
'Township Duplicate': 0,
'Description': raw_cave_text,
})
except:
print("="*80)
print("ERROR: unexpected format for {0}".format(cave.name))
print(raw_cave_text)
import traceback
print(traceback.format_exc())
print("\t" + "\n\t".join([str(x) for x in sys.exc_info()]))
print("Skipping this cave for the next one")
else:
sections = " or ".join(cave.section)
#print("="*80)
#print("{1} := {0.aliquot} Sect. {2}, T. {0.township.number} {0.township.direction}., R. {0.range.number} {0.range.direction}., in {0.county} County on the {0.quad.name} quad map.".format(cave, cave.name, sections))
#print(" Marked on map as {0}".format(cave.quad.alias if cave.quad.alias else cave.name) if cave.quad.is_on_map else " Not on map")
output_path = os.path.basename(filepath).split(".")[0] + ".csv"
print("#"*80)
print("{0} caves processed! Saving to '{1}'.".format(len(caves), output_path))
with open(output_path, 'wb') as f:
cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())
try:
cave_csv.writeheader()
except: # Versions before 2.7 of Python do not have csv with writeheader().
header = {}
for k in caves[0].keys():
header[k] = k
cave_csv.writerow(header)
cave_csv.writerows(caves)
|
normal
|
{
"blob_id": "1fc1d2e1a7d18b1ef8ee6396210afe47a63ab09f",
"index": 3267,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_cave_details(details):\n aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'\n ) | Literal('NW')\n aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')\n aliquotHalfString = oneOf('N E S W') + Suppress('1/2')\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)\n ).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])\n )\n sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))\n sectionNumber = Word(nums)\n section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'\n ) + sectionNumber)).setResultsName('section')\n afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |\n sectionToken)\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n townshipDirection = oneOf('N S').setResultsName('direction')\n townshipNumber = Word(nums).setResultsName('number')\n township = Suppress('T.') + Group(townshipNumber + townshipDirection\n ).setResultsName('township') + Suppress('.')\n rangeDirection = oneOf('E W').setResultsName('direction')\n rangeNumber = Word(nums).setResultsName('number')\n range_info = Suppress('R.') + Group(rangeNumber + rangeDirection\n ).setResultsName('range') + Suppress('.')\n countyKeyword = Literal('County')\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))\n county = countyName + Suppress('County')\n notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda\n x: False)\n shownOnQuad = Literal('S').setParseAction(lambda x: True)\n onKeyword = Literal('on')\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(\n lambda alias: ' '.join(alias[0])).setResultsName('alias')\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\n 'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias\n ) + Suppress(onKeyword)\n quadrangleKeyword = Literal('Quadrangle') + Literal('map')\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'\n ) + Suppress(quadrangleKeyword)\n description = Group(ZeroOrMore(Word(alphanums + printables))\n ).setResultsName('description').setParseAction(lambda desc: ' '.\n join(desc[0]))\n location = caveName + aliquotPart + section + Suppress(','\n ) + township + Suppress(',') + range_info + Suppress(','\n ) + county + quadrangle + description\n return location.parseString(details)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_cave_details(details):\n aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'\n ) | Literal('NW')\n aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')\n aliquotHalfString = oneOf('N E S W') + Suppress('1/2')\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)\n ).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])\n )\n sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))\n sectionNumber = Word(nums)\n section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'\n ) + sectionNumber)).setResultsName('section')\n afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |\n sectionToken)\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n townshipDirection = oneOf('N S').setResultsName('direction')\n townshipNumber = Word(nums).setResultsName('number')\n township = Suppress('T.') + Group(townshipNumber + townshipDirection\n ).setResultsName('township') + Suppress('.')\n rangeDirection = oneOf('E W').setResultsName('direction')\n rangeNumber = Word(nums).setResultsName('number')\n range_info = Suppress('R.') + Group(rangeNumber + rangeDirection\n ).setResultsName('range') + Suppress('.')\n countyKeyword = Literal('County')\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))\n county = countyName + Suppress('County')\n notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda\n x: False)\n shownOnQuad = Literal('S').setParseAction(lambda x: True)\n onKeyword = Literal('on')\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(\n lambda alias: ' '.join(alias[0])).setResultsName('alias')\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\n 'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias\n ) + Suppress(onKeyword)\n quadrangleKeyword = Literal('Quadrangle') + Literal('map')\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'\n ) + Suppress(quadrangleKeyword)\n description = Group(ZeroOrMore(Word(alphanums + printables))\n ).setResultsName('description').setParseAction(lambda desc: ' '.\n join(desc[0]))\n location = caveName + aliquotPart + section + Suppress(','\n ) + township + Suppress(',') + range_info + Suppress(','\n ) + county + quadrangle + description\n return location.parseString(details)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('ERROR: pass in the filename as the second argument.')\n print(' $ python {0} /path/to/file.txt'.format(sys.argv[0]))\n exit()\n filepath = sys.argv[1]\n with open(filepath) as f:\n raw_text = f.read()\n raw_caves = raw_text.split('\\n')\n caves = []\n for raw_cave_text in raw_caves:\n raw_cave_text = raw_cave_text.strip()\n if raw_cave_text:\n try:\n cave = parse_cave_details(raw_cave_text)\n caves.append({'Cave name': cave.name, 'Alias': cave.quad.\n alias, 'On map': cave.quad.is_on_map, 'Quad': cave.quad\n .name, 'County': cave.county, 'State': 'MO',\n 'Principal Meridian Code': 5, 'Township Number': cave.\n township.number, 'Township Fraction': 0,\n 'Township Direction': cave.township.direction,\n 'Range Number': cave.range.number, 'Range Fraction': 0,\n 'Range Direction': cave.range.direction, 'Section':\n cave.section[0], 'Section Division': ''.join(cave.\n aliquot), 'Township Duplicate': 0, 'Description':\n raw_cave_text})\n except:\n print('=' * 80)\n print('ERROR: unexpected format for {0}'.format(cave.name))\n print(raw_cave_text)\n import traceback\n print(traceback.format_exc())\n print('\\t' + '\\n\\t'.join([str(x) for x in sys.exc_info()]))\n print('Skipping this cave for the next one')\n else:\n sections = ' or '.join(cave.section)\n output_path = os.path.basename(filepath).split('.')[0] + '.csv'\n print('#' * 80)\n print(\"{0} caves processed! Saving to '{1}'.\".format(len(caves),\n output_path))\n with open(output_path, 'wb') as f:\n cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())\n try:\n cave_csv.writeheader()\n except:\n header = {}\n for k in caves[0].keys():\n header[k] = k\n cave_csv.writerow(header)\n cave_csv.writerows(caves)\n",
"step-4": "import sys\nimport os\nfrom pyparsing import *\nimport csv\n\n\ndef parse_cave_details(details):\n aliquotQuadrantID = Literal('NE') | Literal('SE') | Literal('SW'\n ) | Literal('NW')\n aliquotQuadrantString = aliquotQuadrantID + Suppress('1/4')\n aliquotHalfString = oneOf('N E S W') + Suppress('1/2')\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString)\n ).setResultsName('aliquot').setParseAction(lambda kwd: ' '.join(kwd[0])\n )\n sectionToken = Suppress(oneOf('S s') + Literal('ec') + Optional('.'))\n sectionNumber = Word(nums)\n section = Group(sectionToken + sectionNumber + ZeroOrMore(Suppress('or'\n ) + sectionNumber)).setResultsName('section')\n afterEndOfCaveName = (aliquotHalfString | aliquotQuadrantString |\n sectionToken)\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n townshipDirection = oneOf('N S').setResultsName('direction')\n townshipNumber = Word(nums).setResultsName('number')\n township = Suppress('T.') + Group(townshipNumber + townshipDirection\n ).setResultsName('township') + Suppress('.')\n rangeDirection = oneOf('E W').setResultsName('direction')\n rangeNumber = Word(nums).setResultsName('number')\n range_info = Suppress('R.') + Group(rangeNumber + rangeDirection\n ).setResultsName('range') + Suppress('.')\n countyKeyword = Literal('County')\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('county').setParseAction(lambda c: ' '.join(c[0]))\n county = countyName + Suppress('County')\n notShownOnQuad = (Literal('Not') + Suppress('s')).setParseAction(lambda\n x: False)\n shownOnQuad = Literal('S').setParseAction(lambda x: True)\n onKeyword = Literal('on')\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables))).setParseAction(\n lambda alias: ' '.join(alias[0])).setResultsName('alias')\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\n 'is_on_map') + Suppress('hown') + Optional(Suppress('as') + mapAlias\n ) + Suppress(onKeyword)\n quadrangleKeyword = Literal('Quadrangle') + Literal('map')\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas + \"-'.\"))\n ).setResultsName('name').setParseAction(lambda name: ' '.join(name[0]))\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName('quad'\n ) + Suppress(quadrangleKeyword)\n description = Group(ZeroOrMore(Word(alphanums + printables))\n ).setResultsName('description').setParseAction(lambda desc: ' '.\n join(desc[0]))\n location = caveName + aliquotPart + section + Suppress(','\n ) + township + Suppress(',') + range_info + Suppress(','\n ) + county + quadrangle + description\n return location.parseString(details)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('ERROR: pass in the filename as the second argument.')\n print(' $ python {0} /path/to/file.txt'.format(sys.argv[0]))\n exit()\n filepath = sys.argv[1]\n with open(filepath) as f:\n raw_text = f.read()\n raw_caves = raw_text.split('\\n')\n caves = []\n for raw_cave_text in raw_caves:\n raw_cave_text = raw_cave_text.strip()\n if raw_cave_text:\n try:\n cave = parse_cave_details(raw_cave_text)\n caves.append({'Cave name': cave.name, 'Alias': cave.quad.\n alias, 'On map': cave.quad.is_on_map, 'Quad': cave.quad\n .name, 'County': cave.county, 'State': 'MO',\n 'Principal Meridian Code': 5, 'Township Number': cave.\n township.number, 'Township Fraction': 0,\n 'Township Direction': cave.township.direction,\n 'Range Number': cave.range.number, 'Range Fraction': 0,\n 'Range Direction': cave.range.direction, 'Section':\n cave.section[0], 'Section Division': ''.join(cave.\n aliquot), 'Township Duplicate': 0, 'Description':\n raw_cave_text})\n except:\n print('=' * 80)\n print('ERROR: unexpected format for {0}'.format(cave.name))\n print(raw_cave_text)\n import traceback\n print(traceback.format_exc())\n print('\\t' + '\\n\\t'.join([str(x) for x in sys.exc_info()]))\n print('Skipping this cave for the next one')\n else:\n sections = ' or '.join(cave.section)\n output_path = os.path.basename(filepath).split('.')[0] + '.csv'\n print('#' * 80)\n print(\"{0} caves processed! Saving to '{1}'.\".format(len(caves),\n output_path))\n with open(output_path, 'wb') as f:\n cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())\n try:\n cave_csv.writeheader()\n except:\n header = {}\n for k in caves[0].keys():\n header[k] = k\n cave_csv.writerow(header)\n cave_csv.writerows(caves)\n",
"step-5": "import sys\r\nimport os\r\nfrom pyparsing import *\r\nimport csv\r\n\r\n\r\ndef parse_cave_details(details):\r\n ##########################################################################\r\n # Define the Bretz Grammar.\r\n # Sample cave description:\r\n # Boring Caverns SE1/4 NW1/4 sec. 16, T. 37 N., R. 10 W., Pulaski County Not shown on Waynesville Quadrangle map The mouth of this cave ...\\n\r\n # Another Cave S1/2 sec. 15, T. 36 N., R. 12 W., Pulaski County Not shown on Waynesville Quadrangle map There are two large caves...\\n\r\n # Something Bridge Sec. 15 or 22, T. 36 N., R. 13 W., Pulaski County Not shown on Richland Quadrangle map This cave is near Ozark...\\n\r\n #\r\n # CAVE ::= CAVE_NAME [ALIQUOT_PART] SECTION, TOWNSHIP, RANGE, COUNTY QUAD_MAP DESCRIPTION\r\n # ALIQUOT_PART ::= (((NE|SE|SW|NW)1/4)|((N|E|S|W)1/2))*\r\n # SECTION ::= (S|s)ec. num+\r\n # TOWNSHIP ::= T. num+ TOWNSHIP_DIR.\r\n # TOWNSHIP_DIR ::= N|S\r\n # RANGE ::= R. num+ RANGE_DIR.\r\n # RANGE_DIR ::= E|W\r\n # COUNTY = WORD+ County\r\n # QUAD_MAP = (Not s|S)hown on QUAD Quadrangle map\r\n # QUAD = WORD+\r\n # DESCRIPTION = WORD+\r\n aliquotQuadrantID = Literal(\"NE\") |\\\r\n Literal(\"SE\") |\\\r\n Literal(\"SW\") |\\\r\n Literal(\"NW\")\r\n aliquotQuadrantString = aliquotQuadrantID + Suppress(\"1/4\")\r\n aliquotHalfString = oneOf(\"N E S W\") + Suppress(\"1/2\")\r\n aliquotPart = Group(ZeroOrMore(aliquotQuadrantString | aliquotHalfString))\\\r\n .setResultsName(\"aliquot\")\\\r\n .setParseAction(lambda kwd: \" \".join(kwd[0]))\r\n\r\n sectionToken = Suppress(oneOf(\"S s\") + Literal(\"ec\") + Optional(\".\"))\r\n sectionNumber = Word(nums)\r\n section = Group(\r\n sectionToken \\\r\n + sectionNumber \\\r\n + ZeroOrMore(Suppress(\"or\") + sectionNumber)\r\n ).setResultsName(\"section\")\r\n\r\n afterEndOfCaveName = aliquotHalfString | aliquotQuadrantString | sectionToken\r\n caveName = Group(OneOrMore(~afterEndOfCaveName + Word(printables)))\\\r\n .setResultsName('name')\\\r\n .setParseAction(lambda name: \" \".join(name[0]))\r\n\r\n townshipDirection = oneOf(\"N S\").setResultsName(\"direction\")\r\n townshipNumber = Word(nums).setResultsName(\"number\")\r\n township = Suppress(\"T.\") \\\r\n + Group(townshipNumber + townshipDirection).setResultsName(\"township\")\\\r\n + Suppress('.')\r\n\r\n rangeDirection = oneOf(\"E W\").setResultsName(\"direction\")\r\n rangeNumber = Word(nums).setResultsName(\"number\")\r\n range_info = Suppress(\"R.\") \\\r\n + Group(rangeNumber + rangeDirection).setResultsName(\"range\")\\\r\n + Suppress('.')\r\n\r\n countyKeyword = Literal(\"County\")\r\n countyName = Group(OneOrMore(~countyKeyword + Word(alphas+\"-'.\")))\\\r\n .setResultsName(\"county\")\\\r\n .setParseAction(lambda c: \" \".join(c[0]))\r\n county = countyName + Suppress(\"County\")\r\n\r\n notShownOnQuad = (Literal(\"Not\") + Suppress(\"s\"))\\\r\n .setParseAction(lambda x: False)\r\n shownOnQuad = Literal(\"S\").setParseAction(lambda x: True)\r\n onKeyword = Literal(\"on\")\r\n mapAlias = Group(OneOrMore(~onKeyword + Word(printables)))\\\r\n .setParseAction(lambda alias: \" \".join(alias[0]))\\\r\n .setResultsName(\"alias\")\r\n quadrangleStatus = (shownOnQuad | notShownOnQuad).setResultsName(\"is_on_map\")\\\r\n + Suppress(\"hown\") \\\r\n + Optional(Suppress('as') + mapAlias)\\\r\n + Suppress(onKeyword)\r\n quadrangleKeyword = Literal(\"Quadrangle\") + Literal(\"map\")\r\n quadrangleName = Group(OneOrMore(~quadrangleKeyword + Word(alphas+\"-'.\")))\\\r\n .setResultsName(\"name\")\\\r\n .setParseAction(lambda name: \" \".join(name[0]))\r\n quadrangle = Group(quadrangleStatus + quadrangleName).setResultsName(\"quad\") \\\r\n + Suppress(quadrangleKeyword)\r\n\r\n description = Group(ZeroOrMore(Word(alphanums + printables)))\\\r\n .setResultsName(\"description\")\\\r\n .setParseAction(lambda desc: \" \".join(desc[0]))\r\n\r\n location = caveName \\\r\n + aliquotPart \\\r\n + section + Suppress(',') \\\r\n + township + Suppress(',') \\\r\n + range_info + Suppress(',')\\\r\n + county \\\r\n + quadrangle \\\r\n + description\r\n\r\n return location.parseString(details)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) < 2:\r\n print(\"ERROR: pass in the filename as the second argument.\")\r\n print(\" $ python {0} /path/to/file.txt\".format(sys.argv[0]))\r\n exit()\r\n\r\n filepath = sys.argv[1]\r\n with open(filepath) as f:\r\n raw_text = f.read()\r\n\r\n raw_caves = raw_text.split(\"\\n\")\r\n caves = []\r\n for raw_cave_text in raw_caves:\r\n raw_cave_text = raw_cave_text.strip()\r\n if raw_cave_text:\r\n try:\r\n cave = parse_cave_details(raw_cave_text)\r\n caves.append({\r\n 'Cave name': cave.name,\r\n 'Alias': cave.quad.alias,\r\n 'On map': cave.quad.is_on_map,\r\n 'Quad': cave.quad.name,\r\n 'County': cave.county,\r\n 'State': 'MO',\r\n 'Principal Meridian Code': 5,\r\n 'Township Number': cave.township.number,\r\n 'Township Fraction': 0,\r\n 'Township Direction': cave.township.direction,\r\n 'Range Number': cave.range.number,\r\n 'Range Fraction': 0,\r\n 'Range Direction': cave.range.direction,\r\n 'Section': cave.section[0],\r\n 'Section Division': \"\".join(cave.aliquot),\r\n 'Township Duplicate': 0,\r\n 'Description': raw_cave_text,\r\n })\r\n\r\n except:\r\n print(\"=\"*80)\r\n print(\"ERROR: unexpected format for {0}\".format(cave.name))\r\n print(raw_cave_text)\r\n import traceback\r\n print(traceback.format_exc())\r\n print(\"\\t\" + \"\\n\\t\".join([str(x) for x in sys.exc_info()]))\r\n print(\"Skipping this cave for the next one\")\r\n else:\r\n sections = \" or \".join(cave.section)\r\n #print(\"=\"*80)\r\n #print(\"{1} := {0.aliquot} Sect. {2}, T. {0.township.number} {0.township.direction}., R. {0.range.number} {0.range.direction}., in {0.county} County on the {0.quad.name} quad map.\".format(cave, cave.name, sections))\r\n #print(\" Marked on map as {0}\".format(cave.quad.alias if cave.quad.alias else cave.name) if cave.quad.is_on_map else \" Not on map\")\r\n\r\n output_path = os.path.basename(filepath).split(\".\")[0] + \".csv\"\r\n print(\"#\"*80)\r\n print(\"{0} caves processed! Saving to '{1}'.\".format(len(caves), output_path))\r\n with open(output_path, 'wb') as f:\r\n cave_csv = csv.DictWriter(f, fieldnames=caves[0].keys())\r\n try:\r\n cave_csv.writeheader()\r\n \r\n except: # Versions before 2.7 of Python do not have csv with writeheader().\r\n header = {}\r\n for k in caves[0].keys():\r\n header[k] = k\r\n \r\n cave_csv.writerow(header)\r\n\r\n cave_csv.writerows(caves)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _HINDERED(_HINDER):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _HINDERED(_HINDER):
def __init__(self):
_HINDER.__init__(self)
self.name = 'HINDERED'
self.specie = 'verbs'
self.basic = 'hinder'
self.jsondata = {}
<|reserved_special_token_1|>
from xai.brain.wordbase.verbs._hinder import _HINDER
class _HINDERED(_HINDER):
def __init__(self):
_HINDER.__init__(self)
self.name = 'HINDERED'
self.specie = 'verbs'
self.basic = 'hinder'
self.jsondata = {}
<|reserved_special_token_1|>
from xai.brain.wordbase.verbs._hinder import _HINDER
#calss header
class _HINDERED(_HINDER, ):
def __init__(self,):
_HINDER.__init__(self)
self.name = "HINDERED"
self.specie = 'verbs'
self.basic = "hinder"
self.jsondata = {}
|
flexible
|
{
"blob_id": "420beba5b6fd575ab9be0c907ae0698ba7be5220",
"index": 4622,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass _HINDERED(_HINDER):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass _HINDERED(_HINDER):\n\n def __init__(self):\n _HINDER.__init__(self)\n self.name = 'HINDERED'\n self.specie = 'verbs'\n self.basic = 'hinder'\n self.jsondata = {}\n",
"step-4": "from xai.brain.wordbase.verbs._hinder import _HINDER\n\n\nclass _HINDERED(_HINDER):\n\n def __init__(self):\n _HINDER.__init__(self)\n self.name = 'HINDERED'\n self.specie = 'verbs'\n self.basic = 'hinder'\n self.jsondata = {}\n",
"step-5": "\n\nfrom xai.brain.wordbase.verbs._hinder import _HINDER\n\n#calss header\nclass _HINDERED(_HINDER, ):\n\tdef __init__(self,): \n\t\t_HINDER.__init__(self)\n\t\tself.name = \"HINDERED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"hinder\"\n\t\tself.jsondata = {}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
クリップボードのamazonのURLから不要な部分を削除する
"""
# -*- coding: utf-8 -*-
import re
import pyperclip as clip
from urllib.parse import urlparse
#print(clip.paste())
def urlShortner():
# text = "https://www.amazon.co.jp/Jupyter-Cookbook-Dan-Toomey/dp/1788839447/ref=sr_1_5?s=books&ie=UTF8&qid=1535164277&sr=1-5&keywords=Jupyter"
if clip.paste():
text = clip.paste()
o = urlparse(text)
# print(o.scheme)
if not (o.scheme == 'http' or o.scheme == 'https') :
print("This is not url.")
return 1
newUrl = "https://www.amazon.co.jp"
urlLen = len(text)
#print(urlLen)
matchObj = re.search(r'https://www.amazon.co.jp', text)
matchObjDp = re.search(r'/dp/', text)
matchObjRef = re.search(r'/ref', text)
""""
if matchObjRef:
print (matchObjDp.start()) # マッチした文字列の開始位置: 3
print(type(matchObj.start()))
print(type(matchObj.end()))
"""
if matchObjDp and matchObjRef:
i:int = matchObjDp.start()
#print("2ndStart:" + str(i) )
while i < matchObjRef.start():
newUrl = newUrl + text[i]
i= i+1
shortUrl = newUrl.replace("www","")
print ("shortUrl:" + shortUrl)
clip.copy(shortUrl)
else:
print ("This url is not an introduction page of books on the amazon website.")
urlShortner()
|
normal
|
{
"blob_id": "c3c82b9ba198b7818cc8e63710140bbb6e28a9ea",
"index": 6628,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef urlShortner():\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n if not (o.scheme == 'http' or o.scheme == 'https'):\n print('This is not url.')\n return 1\n newUrl = 'https://www.amazon.co.jp'\n urlLen = len(text)\n matchObj = re.search('https://www.amazon.co.jp', text)\n matchObjDp = re.search('/dp/', text)\n matchObjRef = re.search('/ref', text)\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n if matchObjDp and matchObjRef:\n i: int = matchObjDp.start()\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i = i + 1\n shortUrl = newUrl.replace('www', '')\n print('shortUrl:' + shortUrl)\n clip.copy(shortUrl)\n else:\n print(\n 'This url is not an introduction page of books on the amazon website.'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef urlShortner():\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n if not (o.scheme == 'http' or o.scheme == 'https'):\n print('This is not url.')\n return 1\n newUrl = 'https://www.amazon.co.jp'\n urlLen = len(text)\n matchObj = re.search('https://www.amazon.co.jp', text)\n matchObjDp = re.search('/dp/', text)\n matchObjRef = re.search('/ref', text)\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n if matchObjDp and matchObjRef:\n i: int = matchObjDp.start()\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i = i + 1\n shortUrl = newUrl.replace('www', '')\n print('shortUrl:' + shortUrl)\n clip.copy(shortUrl)\n else:\n print(\n 'This url is not an introduction page of books on the amazon website.'\n )\n\n\nurlShortner()\n",
"step-4": "<mask token>\nimport re\nimport pyperclip as clip\nfrom urllib.parse import urlparse\n\n\ndef urlShortner():\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n if not (o.scheme == 'http' or o.scheme == 'https'):\n print('This is not url.')\n return 1\n newUrl = 'https://www.amazon.co.jp'\n urlLen = len(text)\n matchObj = re.search('https://www.amazon.co.jp', text)\n matchObjDp = re.search('/dp/', text)\n matchObjRef = re.search('/ref', text)\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n if matchObjDp and matchObjRef:\n i: int = matchObjDp.start()\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i = i + 1\n shortUrl = newUrl.replace('www', '')\n print('shortUrl:' + shortUrl)\n clip.copy(shortUrl)\n else:\n print(\n 'This url is not an introduction page of books on the amazon website.'\n )\n\n\nurlShortner()\n",
"step-5": "\n\"\"\"\nクリップボードのamazonのURLから不要な部分を削除する\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport re\nimport pyperclip as clip\nfrom urllib.parse import urlparse\n\n#print(clip.paste())\n\ndef urlShortner():\n# text = \"https://www.amazon.co.jp/Jupyter-Cookbook-Dan-Toomey/dp/1788839447/ref=sr_1_5?s=books&ie=UTF8&qid=1535164277&sr=1-5&keywords=Jupyter\"\n\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n# print(o.scheme)\n\n if not (o.scheme == 'http' or o.scheme == 'https') :\n print(\"This is not url.\")\n return 1\n\n newUrl = \"https://www.amazon.co.jp\"\n\n urlLen = len(text)\n #print(urlLen)\n\n matchObj = re.search(r'https://www.amazon.co.jp', text)\n matchObjDp = re.search(r'/dp/', text)\n matchObjRef = re.search(r'/ref', text)\n\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n\n if matchObjDp and matchObjRef:\n i:int = matchObjDp.start()\n #print(\"2ndStart:\" + str(i) )\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i= i+1\n\n shortUrl = newUrl.replace(\"www\",\"\")\n\n print (\"shortUrl:\" + shortUrl)\n\n clip.copy(shortUrl)\n\n else:\n print (\"This url is not an introduction page of books on the amazon website.\")\n\n\nurlShortner()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'user'
urlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(
'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),
path('produto/', user_views.painelProdutos, name='painel_produtos'),
path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'
), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]
<|reserved_special_token_1|>
from django.urls import path
from . import views as user_views
from produtos import views as prod_views
from django.contrib.auth import views as auth_views
app_name = 'user'
urlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(
'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),
path('produto/', user_views.painelProdutos, name='painel_produtos'),
path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'
), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]
<|reserved_special_token_1|>
from django.urls import path
from . import views as user_views
from produtos import views as prod_views
from django.contrib.auth import views as auth_views
app_name = 'user'
urlpatterns = [
path('detalhes/', user_views.painel, name="painel"),
path('produto/ajax/delete_prod/', prod_views.deleteProd, name="deleteProd"),
path('produto/', user_views.painelProdutos, name="painel_produtos"),
path('<int:id_produto>', prod_views.detalheProduto, name="detalhe_prod"),
path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),
]
|
flexible
|
{
"blob_id": "a7f2791e359b848a217beadc77fc983d971ef8b0",
"index": 8436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'user'\nurlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(\n 'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n path('produto/', user_views.painelProdutos, name='painel_produtos'),\n path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'\n ), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]\n",
"step-3": "from django.urls import path\nfrom . import views as user_views\nfrom produtos import views as prod_views\nfrom django.contrib.auth import views as auth_views\napp_name = 'user'\nurlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(\n 'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n path('produto/', user_views.painelProdutos, name='painel_produtos'),\n path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'\n ), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]\n",
"step-4": "from django.urls import path\nfrom . import views as user_views\nfrom produtos import views as prod_views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'user'\n\nurlpatterns = [\n path('detalhes/', user_views.painel, name=\"painel\"),\n path('produto/ajax/delete_prod/', prod_views.deleteProd, name=\"deleteProd\"),\n path('produto/', user_views.painelProdutos, name=\"painel_produtos\"),\n path('<int:id_produto>', prod_views.detalheProduto, name=\"detalhe_prod\"),\n path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#-*- coding: utf-8 -*-
import django
if django.get_version() <= '1.3.1':
import apps.settings as settings
from django.core.management import setup_environ
setup_environ(settings)
elif django.get_version() >= '1.7.0':
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apps.settings")
django.setup()
elif django.get_version() >= '1.6.0': #ubuntu 14.04 used 1.6.?
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apps.settings")
from django.conf import settings
import os
import os.path
import traceback
cur_dir = os.path.dirname(os.path.abspath(__file__))
LOGFILE = os.path.join(cur_dir,"logs","oneclick.log")
file_list = ['import_test', 'import_test_dev', 'import_test_local','settings', 'manage', 'settings_dev', 'manage_dev', 'settings_stg','manage_stg', 'settings_local','manage_local']
exclude_dir = ['.svn', 'realtime_pvp']
def run_dir(py_dir):
log_f = open(LOGFILE, 'a+')
try:
for root, dirs, files in os.walk(py_dir):
if os.path.basename(root) not in exclude_dir:
for f in files:
name, ext = os.path.splitext(f)
if ext == '.py' and name not in file_list:
root = root.replace(py_dir, '').replace('/', '.').replace('\\', '.')
print root, name
log_f.write(str(root) + str(name) + '\n')
if root:
__import__('apps.' + root, globals(), locals(), [name], -1)
else:
__import__('apps.' + name, globals(), locals(), [], -1)
log_f.close()
except:
err_info = traceback.format_exc()
print err_info
log_f.write(err_info+ '\n')
log_f.close()
if __name__ == '__main__':
run_dir(settings.BASE_ROOT+'/apps/')
|
normal
|
{
"blob_id": "8894b73829978cec29aab6ee8bf09700da7fb59f",
"index": 5659,
"step-1": "#-*- coding: utf-8 -*-\n\nimport django\n\nif django.get_version() <= '1.3.1':\n import apps.settings as settings\n from django.core.management import setup_environ\n setup_environ(settings)\nelif django.get_version() >= '1.7.0': \n import os\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"apps.settings\")\n django.setup()\nelif django.get_version() >= '1.6.0': #ubuntu 14.04 used 1.6.?\n import os\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"apps.settings\")\n from django.conf import settings\n\n\nimport os\nimport os.path\nimport traceback\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nLOGFILE = os.path.join(cur_dir,\"logs\",\"oneclick.log\")\nfile_list = ['import_test', 'import_test_dev', 'import_test_local','settings', 'manage', 'settings_dev', 'manage_dev', 'settings_stg','manage_stg', 'settings_local','manage_local']\nexclude_dir = ['.svn', 'realtime_pvp']\n\ndef run_dir(py_dir):\n log_f = open(LOGFILE, 'a+')\n try:\n for root, dirs, files in os.walk(py_dir):\n if os.path.basename(root) not in exclude_dir:\n for f in files:\n name, ext = os.path.splitext(f)\n if ext == '.py' and name not in file_list:\n root = root.replace(py_dir, '').replace('/', '.').replace('\\\\', '.')\n print root, name\n log_f.write(str(root) + str(name) + '\\n')\n if root:\n __import__('apps.' + root, globals(), locals(), [name], -1)\n else:\n __import__('apps.' + name, globals(), locals(), [], -1)\n log_f.close()\n except:\n err_info = traceback.format_exc()\n print err_info\n log_f.write(err_info+ '\\n')\n log_f.close()\n\nif __name__ == '__main__':\n run_dir(settings.BASE_ROOT+'/apps/')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.shortcuts import render
from django.template import loader
# Create your views here.
from django.http import HttpResponse
from .models import Student
def index(request):
student_objects = Student.objects.all()
context = {"students": student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
def newstudent(request):
student_entered_name = request.GET.get('name')
Student.objects.create(name=student_entered_name)
print(student_entered_name)
context = {}
return render(request, 'student_list.html', context)
|
normal
|
{
"blob_id": "00e8e0b5aeccd2a67f6cfdad63012a0d8b066e6f",
"index": 9551,
"step-1": "<mask token>\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\ndef newstudent(request):\n student_entered_name = request.GET.get('name')\n Student.objects.create(name=student_entered_name)\n print(student_entered_name)\n context = {}\n return render(request, 'student_list.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom .models import Student\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\ndef newstudent(request):\n student_entered_name = request.GET.get('name')\n Student.objects.create(name=student_entered_name)\n print(student_entered_name)\n context = {}\n return render(request, 'student_list.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom django.template import loader\n\n# Create your views here.\n\nfrom django.http import HttpResponse\n\nfrom .models import Student\n\ndef index(request):\n\tstudent_objects = Student.objects.all()\n\tcontext = {\"students\": student_objects}\n\treturn render(request, 'student_list.html', context)\n\ndef addstudent(request):\n\tcontext = {}\n\treturn render(request, 'add_student.html', context)\n\ndef newstudent(request):\n\tstudent_entered_name = request.GET.get('name')\n\tStudent.objects.create(name=student_entered_name)\n\tprint(student_entered_name)\n\tcontext = {}\n\treturn render(request, 'student_list.html', context)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while income_number != 0:
num_exp = 10 ** (len(str(income_number)) - 1)
deleted_number = int(income_number / num_exp)
if max_number < deleted_number:
max_number = deleted_number
income_number = income_number - deleted_number * num_exp
print(f'Самая большая цифра в числе {max_number}')
<|reserved_special_token_1|>
income_number = int(input('Введите, пожалуйста, целое положительное число '))
max_number = 0
while income_number != 0:
num_exp = 10 ** (len(str(income_number)) - 1)
deleted_number = int(income_number / num_exp)
if max_number < deleted_number:
max_number = deleted_number
income_number = income_number - deleted_number * num_exp
print(f'Самая большая цифра в числе {max_number}')
<|reserved_special_token_1|>
# 4. Пользователь вводит целое положительное число.
# Найдите самую большую цифру в числе. Для решения используйте цикл while и арифметические операции.
income_number = int(input('Введите, пожалуйста, целое положительное число '))
max_number = 0
# в другую сторону решение, не так как Вы на вебинаре советовали, но тоже работает, и не сказать чтобы сильно длинее...
while income_number != 0: # продолжаю цикл вплоть до уничтожения числа
num_exp = 10 ** (len(str(income_number)) - 1) # устанавливаю размерность числа
deleted_number = int(income_number / num_exp) # узнаю крайнюю левую цифру
if max_number < deleted_number: # перезапись максимальной, если есть такая необходимость
max_number = deleted_number
income_number = income_number - deleted_number * num_exp # "откусываю" крайнюю левую цифру
print(f'Самая большая цифра в числе {max_number}')
|
flexible
|
{
"blob_id": "18e0ece7c38169d2de91a07dddd4f40b7427848f",
"index": 3759,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile income_number != 0:\n num_exp = 10 ** (len(str(income_number)) - 1)\n deleted_number = int(income_number / num_exp)\n if max_number < deleted_number:\n max_number = deleted_number\n income_number = income_number - deleted_number * num_exp\nprint(f'Самая большая цифра в числе {max_number}')\n",
"step-3": "income_number = int(input('Введите, пожалуйста, целое положительное число '))\nmax_number = 0\nwhile income_number != 0:\n num_exp = 10 ** (len(str(income_number)) - 1)\n deleted_number = int(income_number / num_exp)\n if max_number < deleted_number:\n max_number = deleted_number\n income_number = income_number - deleted_number * num_exp\nprint(f'Самая большая цифра в числе {max_number}')\n",
"step-4": "# 4. Пользователь вводит целое положительное число.\n# Найдите самую большую цифру в числе. Для решения используйте цикл while и арифметические операции.\n\nincome_number = int(input('Введите, пожалуйста, целое положительное число '))\n\nmax_number = 0\n# в другую сторону решение, не так как Вы на вебинаре советовали, но тоже работает, и не сказать чтобы сильно длинее...\nwhile income_number != 0: # продолжаю цикл вплоть до уничтожения числа\n num_exp = 10 ** (len(str(income_number)) - 1) # устанавливаю размерность числа\n deleted_number = int(income_number / num_exp) # узнаю крайнюю левую цифру\n if max_number < deleted_number: # перезапись максимальной, если есть такая необходимость\n max_number = deleted_number\n income_number = income_number - deleted_number * num_exp # \"откусываю\" крайнюю левую цифру\n\nprint(f'Самая большая цифра в числе {max_number}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {'f': 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[1, 2] == {'f': 8}
assert g2.edges[2, 3] == {'f': 8}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {'f': 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[1, 2] == {'f': 8}
assert g2.edges[2, 3] == {'f': 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()['f'] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata['x'] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {'x': 4, 'f': 8}
<|reserved_special_token_1|>
import networkx as nx
import pytest
from caldera.utils.nx import nx_copy
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {'x': 5}
assert g2.edges[1, 2] == {'y': 6}
assert g2.edges[2, 3] == {'z': []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[2, 3] is not g1.edges[2, 3]
@pytest.mark.parametrize('do_deepcopy', [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.
OrderedDiGraph, nx.Graph)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert '1' in g2
assert '2' in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges['1', '2'] == {'f': 4}
assert g2.edges['2', '3'] == {'f': 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {'f': 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[1, 2] == {'f': 8}
assert g2.edges[2, 3] == {'f': 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()['f'] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata['x'] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {'x': 4, 'f': 8}
<|reserved_special_token_1|>
import networkx as nx
import pytest
from caldera.utils.nx import nx_copy
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {"x": 5}
assert g2.edges[(1, 2)] == {"y": 6}
assert g2.edges[(2, 3)] == {"z": []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[(2, 3)] is not g1.edges[(2, 3)]
@pytest.mark.parametrize("do_deepcopy", [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[(2, 3)]["z"] is g.edges[(2, 3)]["z"]) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(
nx.OrderedDiGraph, nx.Graph
)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert "1" in g2
assert "2" in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges[("1", "2")] == {"f": 4}
assert g2.edges[("2", "3")] == {"f": 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {"f": 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[(1, 2)] == {"f": 8}
assert g2.edges[(2, 3)] == {"f": 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()["f"] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata["x"] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {"x": 4, "f": 8}
|
flexible
|
{
"blob_id": "7fe7ea89908f9d233dbdb9e46bf2d677406ab324",
"index": 1050,
"step-1": "<mask token>\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\ndef test_nx_copy_global_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n g.get_global()['f'] = 8\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def global_transform(g):\n for _, gdata in g:\n gdata['x'] = 4\n yield _, gdata\n g2 = nx_copy(g, None, global_transform=global_transform)\n assert g2.get_global() == {'x': 4, 'f': 8}\n",
"step-4": "import networkx as nx\nimport pytest\nfrom caldera.utils.nx import nx_copy\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\ndef test_nx_copy_global_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n g.get_global()['f'] = 8\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def global_transform(g):\n for _, gdata in g:\n gdata['x'] = 4\n yield _, gdata\n g2 = nx_copy(g, None, global_transform=global_transform)\n assert g2.get_global() == {'x': 4, 'f': 8}\n",
"step-5": "import networkx as nx\nimport pytest\n\nfrom caldera.utils.nx import nx_copy\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {\"x\": 5}\n assert g2.edges[(1, 2)] == {\"y\": 6}\n assert g2.edges[(2, 3)] == {\"z\": []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[(2, 3)] is not g1.edges[(2, 3)]\n\n\[email protected](\"do_deepcopy\", [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[(2, 3)][\"z\"] is g.edges[(2, 3)][\"z\"]) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(\n nx.OrderedDiGraph, nx.Graph\n )\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert \"1\" in g2\n assert \"2\" in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges[(\"1\", \"2\")] == {\"f\": 4}\n assert g2.edges[(\"2\", \"3\")] == {\"f\": 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {\"f\": 8}\n\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[(1, 2)] == {\"f\": 8}\n assert g2.edges[(2, 3)] == {\"f\": 8}\n\n\ndef test_nx_copy_global_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n g.get_global()[\"f\"] = 8\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def global_transform(g):\n for _, gdata in g:\n gdata[\"x\"] = 4\n yield _, gdata\n\n g2 = nx_copy(g, None, global_transform=global_transform)\n assert g2.get_global() == {\"x\": 4, \"f\": 8}\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@Client.on_callback_query(filters.regex('^change_lg_'))
async def on_change_language(_, callback):
settings_id = int(callback.data.split('_')[2])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'),
show_alert=True)
return
await callback.answer()
await callback.edit_message_text(**languages.create_message_data(
callback.db_user, settings.chat, settings))
@Client.on_callback_query(filters.regex('^language_g_'))
async def on_language_selected(_, callback):
data = callback.data.split('_')[2:]
settings_id = int(data[0])
language = '_'.join(data[1:])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'),
show_alert=True)
return
settings.chat.language = language
await callback.answer(settings.chat.get_message('language_selected',
flag=settings.chat.get_message('flag')), show_alert=True)
try:
await callback.edit_message_text(**languages.
create_message_data(callback.db_user, settings.chat, settings))
except MessageNotModified:
pass
<|reserved_special_token_1|>
from pyrogram import Client, filters
from pyrogram.errors import MessageNotModified
from db.models import *
@Client.on_callback_query(filters.regex('^change_lg_'))
async def on_change_language(_, callback):
settings_id = int(callback.data.split('_')[2])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'),
show_alert=True)
return
await callback.answer()
await callback.edit_message_text(**languages.create_message_data(
callback.db_user, settings.chat, settings))
@Client.on_callback_query(filters.regex('^language_g_'))
async def on_language_selected(_, callback):
data = callback.data.split('_')[2:]
settings_id = int(data[0])
language = '_'.join(data[1:])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'),
show_alert=True)
return
settings.chat.language = language
await callback.answer(settings.chat.get_message('language_selected',
flag=settings.chat.get_message('flag')), show_alert=True)
try:
await callback.edit_message_text(**languages.
create_message_data(callback.db_user, settings.chat, settings))
except MessageNotModified:
pass
<|reserved_special_token_1|>
from pyrogram import Client, filters
from pyrogram.errors import MessageNotModified
from db.models import *
@Client.on_callback_query(filters.regex('^change_lg_'))
async def on_change_language(_, callback):
settings_id = int(callback.data.split('_')[2])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)
return
await callback.answer()
await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))
@Client.on_callback_query(filters.regex('^language_g_'))
async def on_language_selected(_, callback):
data = callback.data.split('_')[2:]
settings_id = int(data[0])
language = '_'.join(data[1:])
with db_session:
settings = SettingsInstance.get(id=settings_id)
if not settings or not settings.can_edit(callback.db_user):
await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)
return
settings.chat.language = language
await callback.answer(settings.chat.get_message('language_selected', flag=settings.chat.get_message('flag')),
show_alert=True)
try:
await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))
except MessageNotModified: # If the user selects the same language he already had
pass
|
flexible
|
{
"blob_id": "dd053da45d2577772414b1373ba324b0bfdc0d94",
"index": 6605,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_callback_query(filters.regex('^change_lg_'))\nasync def on_change_language(_, callback):\n settings_id = int(callback.data.split('_')[2])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n await callback.answer()\n await callback.edit_message_text(**languages.create_message_data(\n callback.db_user, settings.chat, settings))\n\n\[email protected]_callback_query(filters.regex('^language_g_'))\nasync def on_language_selected(_, callback):\n data = callback.data.split('_')[2:]\n settings_id = int(data[0])\n language = '_'.join(data[1:])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n settings.chat.language = language\n await callback.answer(settings.chat.get_message('language_selected',\n flag=settings.chat.get_message('flag')), show_alert=True)\n try:\n await callback.edit_message_text(**languages.\n create_message_data(callback.db_user, settings.chat, settings))\n except MessageNotModified:\n pass\n",
"step-3": "from pyrogram import Client, filters\nfrom pyrogram.errors import MessageNotModified\nfrom db.models import *\n\n\[email protected]_callback_query(filters.regex('^change_lg_'))\nasync def on_change_language(_, callback):\n settings_id = int(callback.data.split('_')[2])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n await callback.answer()\n await callback.edit_message_text(**languages.create_message_data(\n callback.db_user, settings.chat, settings))\n\n\[email protected]_callback_query(filters.regex('^language_g_'))\nasync def on_language_selected(_, callback):\n data = callback.data.split('_')[2:]\n settings_id = int(data[0])\n language = '_'.join(data[1:])\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'),\n show_alert=True)\n return\n settings.chat.language = language\n await callback.answer(settings.chat.get_message('language_selected',\n flag=settings.chat.get_message('flag')), show_alert=True)\n try:\n await callback.edit_message_text(**languages.\n create_message_data(callback.db_user, settings.chat, settings))\n except MessageNotModified:\n pass\n",
"step-4": "from pyrogram import Client, filters\nfrom pyrogram.errors import MessageNotModified\n\nfrom db.models import *\n\n\[email protected]_callback_query(filters.regex('^change_lg_'))\nasync def on_change_language(_, callback):\n settings_id = int(callback.data.split('_')[2])\n\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)\n return\n\n await callback.answer()\n await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))\n\n\[email protected]_callback_query(filters.regex('^language_g_'))\nasync def on_language_selected(_, callback):\n data = callback.data.split('_')[2:]\n settings_id = int(data[0])\n language = '_'.join(data[1:])\n\n with db_session:\n settings = SettingsInstance.get(id=settings_id)\n\n if not settings or not settings.can_edit(callback.db_user):\n await callback.answer(callback.db_user.get_message('not_admin'), show_alert=True)\n return\n\n settings.chat.language = language\n await callback.answer(settings.chat.get_message('language_selected', flag=settings.chat.get_message('flag')),\n show_alert=True)\n\n try:\n await callback.edit_message_text(**languages.create_message_data(callback.db_user, settings.chat, settings))\n except MessageNotModified: # If the user selects the same language he already had\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += 'No resource in {}'.format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.
check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = 'Collect Resource ServerCommand : {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self) ->dict:
json_dict = super().to_json_dict()
json_dict['command'] = 'collect'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += 'No building on {}'.format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(building.inventory, self._building_process.
item_required).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.
item_result).check(self.check_result)
EnergyCheck(player, self._building_process.energy_required).check(self
.check_result)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = 'BuildingProcessCommand ServerCommand {}'.format(self.
_building_process)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingProcess.from_json_dict(
json_dict['building_process']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'building_process'
json_dict['tile'] = self._tile
json_dict['building_process'] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'buy'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name
)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sell'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.
check_result)
TransactionCheck(building, building, self._item).check(self.
check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = 'Build Building ServerCommand {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build_building'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += 'construction not finished'
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = 'Upgrade Building ServerCommand {}'.format(self._tile)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'upgrade_building'
json_dict['tile'] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
if tile in self.town.buildings and self.town.buildings[tile
].name != 'cabane':
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
player.status = 'sleep'
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
if tile in self.town.buildings and self.town.buildings[tile
].name == 'cabane':
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = 'Sleep command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sleep'
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += '{} is already awake'.format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = 'idle'
player.energy.reset_regen()
def __repr__(self):
msg = 'Wake up command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'wakeup'
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self.client_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
_player_to_help_id)
return
if self.town.get_player_tile(self.client_id
) != self.town.get_player_tile(self._player_to_help_id):
self.check_result += ('Players {} and {} are not in the same tile'
.format(self.client_id, self._player_to_help_id))
return
EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand
.ENERGY_TO_HELP).check(self.check_result)
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check)
if is_alive_check:
self.check_result += '{} has enough health to keep moving'.format(
self._player_to_help_id)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = 'HelpPlayerCommand: try to help {}'.format(self.
_player_to_help_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:
return cls(json_dict['player_to_help_id'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'help'
json_dict['player_to_help_id'] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT['move'] = MovePlayerCommand
COMMANDS_DICT['build'] = BuildCommand
COMMANDS_DICT['collect'] = CollectResourceCommand
COMMANDS_DICT['building_process'] = BuildingProcessCommand
COMMANDS_DICT['buy'] = BuyCommand
COMMANDS_DICT['sell'] = SellCommand
COMMANDS_DICT['build_building'] = BuildBuildingCommand
COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand
COMMANDS_DICT['help'] = HelpPlayerCommand
COMMANDS_DICT['sleep'] = SleepCommand
COMMANDS_DICT['wakeup'] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict['client_id']
command.check_result = CheckResult.from_json_dict(podsixnet_dict[
'check_result'])
return command
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def from_json_dict(cls, json_dict: dict) ->BuildCommand:
return cls(json_dict['tile'], json_dict['building_name'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build'
json_dict['building_name'] = self._building_name
json_dict['tile'] = self._tile
return json_dict
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += 'No resource in {}'.format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.
check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = 'Collect Resource ServerCommand : {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self) ->dict:
json_dict = super().to_json_dict()
json_dict['command'] = 'collect'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += 'No building on {}'.format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(building.inventory, self._building_process.
item_required).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.
item_result).check(self.check_result)
EnergyCheck(player, self._building_process.energy_required).check(self
.check_result)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = 'BuildingProcessCommand ServerCommand {}'.format(self.
_building_process)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingProcess.from_json_dict(
json_dict['building_process']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'building_process'
json_dict['tile'] = self._tile
json_dict['building_process'] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'buy'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name
)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sell'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.
check_result)
TransactionCheck(building, building, self._item).check(self.
check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = 'Build Building ServerCommand {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build_building'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += 'construction not finished'
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = 'Upgrade Building ServerCommand {}'.format(self._tile)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'upgrade_building'
json_dict['tile'] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
if tile in self.town.buildings and self.town.buildings[tile
].name != 'cabane':
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
player.status = 'sleep'
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
if tile in self.town.buildings and self.town.buildings[tile
].name == 'cabane':
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = 'Sleep command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sleep'
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += '{} is already awake'.format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = 'idle'
player.energy.reset_regen()
def __repr__(self):
msg = 'Wake up command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'wakeup'
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self.client_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
_player_to_help_id)
return
if self.town.get_player_tile(self.client_id
) != self.town.get_player_tile(self._player_to_help_id):
self.check_result += ('Players {} and {} are not in the same tile'
.format(self.client_id, self._player_to_help_id))
return
EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand
.ENERGY_TO_HELP).check(self.check_result)
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check)
if is_alive_check:
self.check_result += '{} has enough health to keep moving'.format(
self._player_to_help_id)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = 'HelpPlayerCommand: try to help {}'.format(self.
_player_to_help_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:
return cls(json_dict['player_to_help_id'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'help'
json_dict['player_to_help_id'] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT['move'] = MovePlayerCommand
COMMANDS_DICT['build'] = BuildCommand
COMMANDS_DICT['collect'] = CollectResourceCommand
COMMANDS_DICT['building_process'] = BuildingProcessCommand
COMMANDS_DICT['buy'] = BuyCommand
COMMANDS_DICT['sell'] = SellCommand
COMMANDS_DICT['build_building'] = BuildBuildingCommand
COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand
COMMANDS_DICT['help'] = HelpPlayerCommand
COMMANDS_DICT['sleep'] = SleepCommand
COMMANDS_DICT['wakeup'] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict['client_id']
command.check_result = CheckResult.from_json_dict(podsixnet_dict[
'check_result'])
return command
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MovePlayerCommand(ServerCommand):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
msg = 'Move ServerCommand : {}'.format(self._direction)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
def _check(self):
player = self.town.get_player(self.client_id)
EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.
check_result)
AvailableCheck(player).check(self.check_result)
for tile in self._get_tiles_coordinates_dict().values():
if tile not in self.town.backgrounds.keys():
self.check_result += 'tile {} not in town'.format(tile)
return
BackgroundMovementCheck(self.town.backgrounds[tile], player).check(
self.check_result)
def _do(self):
x_dest, y_dest = self.tile_dest
player = self.town.get_player(self.client_id)
player.status = 'move'
player.direction = self._direction
player.energy.value -= MovePlayerCommand.ENERGY_COST
player.x = x_dest
player.y = y_dest
@property
def tile_dest(self) ->tuple:
movement_matrix = {}
movement_matrix['left'] = -1, 0
movement_matrix['right'] = +1, 0
movement_matrix['up'] = 0, -1
movement_matrix['down'] = 0, +1
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
background = self.town.backgrounds[tile]
bg_multiplicator = background.move_multiplicator
x_dest = player.x + movement_matrix[self._direction][0
] * bg_multiplicator * player.velocity
y_dest = player.y + movement_matrix[self._direction][1
] * bg_multiplicator * player.velocity
return x_dest, y_dest
def _get_tiles_coordinates_dict(self):
x_dest, y_dest = self.tile_dest
tiles_coordinates_dict = {'topleft': (math.floor(x_dest), math.
floor(y_dest)), 'topright': (math.floor(x_dest + 0.99), math.
floor(y_dest)), 'bottomleft': (math.floor(x_dest), math.floor(
y_dest + 0.99)), 'bottomright': (math.floor(x_dest + 0.99),
math.floor(y_dest + 0.99))}
return tiles_coordinates_dict
@classmethod
def from_json_dict(cls, json_dict) ->MovePlayerCommand:
return cls(json_dict['direction'])
<|reserved_special_token_0|>
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.backgrounds:
self.check_result += 'tile {} not in town'.format(self._tile)
return
background = self.town.backgrounds[self._tile]
BackgroundBuildCheck(background, self._building_name).check(self.
check_result)
if self._tile in self.town.buildings:
self.check_result += ("Can't build {} : {} already built on {}"
.format(self._building_name, self.town.buildings[self._tile
].name, self._tile))
def _do(self):
self.town.set_building(BuildingFactory.create_building_by_name(self
._building_name), self._tile)
def __repr__(self):
msg = 'Build ServerCommand : {} in {}'.format(self._building_name,
self._tile)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->BuildCommand:
return cls(json_dict['tile'], json_dict['building_name'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build'
json_dict['building_name'] = self._building_name
json_dict['tile'] = self._tile
return json_dict
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += 'No resource in {}'.format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.
check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = 'Collect Resource ServerCommand : {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self) ->dict:
json_dict = super().to_json_dict()
json_dict['command'] = 'collect'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += 'No building on {}'.format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(building.inventory, self._building_process.
item_required).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.
item_result).check(self.check_result)
EnergyCheck(player, self._building_process.energy_required).check(self
.check_result)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = 'BuildingProcessCommand ServerCommand {}'.format(self.
_building_process)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingProcess.from_json_dict(
json_dict['building_process']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'building_process'
json_dict['tile'] = self._tile
json_dict['building_process'] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'buy'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name
)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sell'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.
check_result)
TransactionCheck(building, building, self._item).check(self.
check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = 'Build Building ServerCommand {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build_building'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += 'construction not finished'
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = 'Upgrade Building ServerCommand {}'.format(self._tile)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'upgrade_building'
json_dict['tile'] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
if tile in self.town.buildings and self.town.buildings[tile
].name != 'cabane':
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
player.status = 'sleep'
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
if tile in self.town.buildings and self.town.buildings[tile
].name == 'cabane':
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = 'Sleep command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sleep'
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += '{} is already awake'.format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = 'idle'
player.energy.reset_regen()
def __repr__(self):
msg = 'Wake up command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'wakeup'
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self.client_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
_player_to_help_id)
return
if self.town.get_player_tile(self.client_id
) != self.town.get_player_tile(self._player_to_help_id):
self.check_result += ('Players {} and {} are not in the same tile'
.format(self.client_id, self._player_to_help_id))
return
EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand
.ENERGY_TO_HELP).check(self.check_result)
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check)
if is_alive_check:
self.check_result += '{} has enough health to keep moving'.format(
self._player_to_help_id)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = 'HelpPlayerCommand: try to help {}'.format(self.
_player_to_help_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:
return cls(json_dict['player_to_help_id'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'help'
json_dict['player_to_help_id'] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT['move'] = MovePlayerCommand
COMMANDS_DICT['build'] = BuildCommand
COMMANDS_DICT['collect'] = CollectResourceCommand
COMMANDS_DICT['building_process'] = BuildingProcessCommand
COMMANDS_DICT['buy'] = BuyCommand
COMMANDS_DICT['sell'] = SellCommand
COMMANDS_DICT['build_building'] = BuildBuildingCommand
COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand
COMMANDS_DICT['help'] = HelpPlayerCommand
COMMANDS_DICT['sleep'] = SleepCommand
COMMANDS_DICT['wakeup'] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict['client_id']
command.check_result = CheckResult.from_json_dict(podsixnet_dict[
'check_result'])
return command
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MovePlayerCommand(ServerCommand):
<|reserved_special_token_0|>
def __init__(self, direction: str):
ServerCommand.__init__(self)
self._direction = direction
def __repr__(self):
msg = 'Move ServerCommand : {}'.format(self._direction)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
def _check(self):
player = self.town.get_player(self.client_id)
EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.
check_result)
AvailableCheck(player).check(self.check_result)
for tile in self._get_tiles_coordinates_dict().values():
if tile not in self.town.backgrounds.keys():
self.check_result += 'tile {} not in town'.format(tile)
return
BackgroundMovementCheck(self.town.backgrounds[tile], player).check(
self.check_result)
def _do(self):
x_dest, y_dest = self.tile_dest
player = self.town.get_player(self.client_id)
player.status = 'move'
player.direction = self._direction
player.energy.value -= MovePlayerCommand.ENERGY_COST
player.x = x_dest
player.y = y_dest
@property
def tile_dest(self) ->tuple:
movement_matrix = {}
movement_matrix['left'] = -1, 0
movement_matrix['right'] = +1, 0
movement_matrix['up'] = 0, -1
movement_matrix['down'] = 0, +1
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
background = self.town.backgrounds[tile]
bg_multiplicator = background.move_multiplicator
x_dest = player.x + movement_matrix[self._direction][0
] * bg_multiplicator * player.velocity
y_dest = player.y + movement_matrix[self._direction][1
] * bg_multiplicator * player.velocity
return x_dest, y_dest
def _get_tiles_coordinates_dict(self):
x_dest, y_dest = self.tile_dest
tiles_coordinates_dict = {'topleft': (math.floor(x_dest), math.
floor(y_dest)), 'topright': (math.floor(x_dest + 0.99), math.
floor(y_dest)), 'bottomleft': (math.floor(x_dest), math.floor(
y_dest + 0.99)), 'bottomright': (math.floor(x_dest + 0.99),
math.floor(y_dest + 0.99))}
return tiles_coordinates_dict
@classmethod
def from_json_dict(cls, json_dict) ->MovePlayerCommand:
return cls(json_dict['direction'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'move'
json_dict['direction'] = self._direction
return json_dict
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.backgrounds:
self.check_result += 'tile {} not in town'.format(self._tile)
return
background = self.town.backgrounds[self._tile]
BackgroundBuildCheck(background, self._building_name).check(self.
check_result)
if self._tile in self.town.buildings:
self.check_result += ("Can't build {} : {} already built on {}"
.format(self._building_name, self.town.buildings[self._tile
].name, self._tile))
def _do(self):
self.town.set_building(BuildingFactory.create_building_by_name(self
._building_name), self._tile)
def __repr__(self):
msg = 'Build ServerCommand : {} in {}'.format(self._building_name,
self._tile)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->BuildCommand:
return cls(json_dict['tile'], json_dict['building_name'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build'
json_dict['building_name'] = self._building_name
json_dict['tile'] = self._tile
return json_dict
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += 'No resource in {}'.format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.
check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = 'Collect Resource ServerCommand : {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self) ->dict:
json_dict = super().to_json_dict()
json_dict['command'] = 'collect'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += 'No building on {}'.format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(building.inventory, self._building_process.
item_required).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.
item_result).check(self.check_result)
EnergyCheck(player, self._building_process.energy_required).check(self
.check_result)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = 'BuildingProcessCommand ServerCommand {}'.format(self.
_building_process)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingProcess.from_json_dict(
json_dict['building_process']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'building_process'
json_dict['tile'] = self._tile
json_dict['building_process'] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'buy'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name
)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], BuildingTransaction.from_json_dict(
json_dict['transaction']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sell'
json_dict['tile'] = self._tile
json_dict['transaction'] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.
check_result)
TransactionCheck(building, building, self._item).check(self.
check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = 'Build Building ServerCommand {}'.format(self._item)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'build_building'
json_dict['tile'] = self._tile
json_dict['item'] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += 'construction not finished'
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = 'Upgrade Building ServerCommand {}'.format(self._tile)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict['tile'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'upgrade_building'
json_dict['tile'] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
if tile in self.town.buildings and self.town.buildings[tile
].name != 'cabane':
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
player.status = 'sleep'
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
if tile in self.town.buildings and self.town.buildings[tile
].name == 'cabane':
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = 'Sleep command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'sleep'
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += '{} is already awake'.format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = 'idle'
player.energy.reset_regen()
def __repr__(self):
msg = 'Wake up command. Player id: {}'.format(self.client_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'wakeup'
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self.client_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += 'Player {} does not exist'.format(self.
_player_to_help_id)
return
if self.town.get_player_tile(self.client_id
) != self.town.get_player_tile(self._player_to_help_id):
self.check_result += ('Players {} and {} are not in the same tile'
.format(self.client_id, self._player_to_help_id))
return
EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand
.ENERGY_TO_HELP).check(self.check_result)
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check)
if is_alive_check:
self.check_result += '{} has enough health to keep moving'.format(
self._player_to_help_id)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = 'HelpPlayerCommand: try to help {}'.format(self.
_player_to_help_id)
if not self.check_result:
msg += '\n{}'.format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:
return cls(json_dict['player_to_help_id'])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict['command'] = 'help'
json_dict['player_to_help_id'] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT['move'] = MovePlayerCommand
COMMANDS_DICT['build'] = BuildCommand
COMMANDS_DICT['collect'] = CollectResourceCommand
COMMANDS_DICT['building_process'] = BuildingProcessCommand
COMMANDS_DICT['buy'] = BuyCommand
COMMANDS_DICT['sell'] = SellCommand
COMMANDS_DICT['build_building'] = BuildBuildingCommand
COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand
COMMANDS_DICT['help'] = HelpPlayerCommand
COMMANDS_DICT['sleep'] = SleepCommand
COMMANDS_DICT['wakeup'] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict['client_id']
command.check_result = CheckResult.from_json_dict(podsixnet_dict[
'check_result'])
return command
<|reserved_special_token_1|>
from __future__ import annotations
import math
from abc import abstractmethod
from pytown_core.patterns.behavioral import Command
from pytown_core.serializers import IJSONSerializable
from .buildings import BuildingProcess, BuildingTransaction
from .buildings.factory import BuildingFactory
from .check import (
AvailableCheck,
AwakenCheck,
BackgroundBuildCheck,
BackgroundMovementCheck,
CheckResult,
EnergyCheck,
InventoryAddCheck,
InventoryRemoveCheck,
TransactionCheck,
)
from .inventory import Item
class ServerCommand(IJSONSerializable, Command):
def __init__(self):
self.client_id = None
self.town = None # TODO: will be set by townmanager
self.check_result = CheckResult()
def execute(self):
self._check()
if self.check_result:
self._do()
@abstractmethod
def _check(self):
raise NotImplementedError
@abstractmethod
def _do(self):
raise NotImplementedError
@abstractmethod
def __repr__(self):
pass
@classmethod
@abstractmethod
def from_json_dict(cls, json_dict) -> ServerCommand:
raise NotImplementedError
def to_json_dict(self) -> dict:
json_dict = {}
json_dict["client_id"] = self.client_id
json_dict["check_result"] = self.check_result.to_json_dict()
return json_dict
def to_podsixnet(self):
podsixnet_dict = self.to_json_dict()
podsixnet_dict["action"] = "command"
return podsixnet_dict
class MovePlayerCommand(ServerCommand):
ENERGY_COST = 1
def __init__(self, direction: str):
ServerCommand.__init__(self)
self._direction = direction
def __repr__(self):
msg = "Move ServerCommand : {}".format(self._direction)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
def _check(self):
player = self.town.get_player(self.client_id)
EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.check_result)
AvailableCheck(player).check(self.check_result)
for tile in self._get_tiles_coordinates_dict().values():
if tile not in self.town.backgrounds.keys():
self.check_result += "tile {} not in town".format(tile)
return
BackgroundMovementCheck(self.town.backgrounds[tile], player).check(
self.check_result
)
def _do(self):
(x_dest, y_dest) = self.tile_dest
player = self.town.get_player(self.client_id)
player.status = "move"
player.direction = self._direction
player.energy.value -= MovePlayerCommand.ENERGY_COST
player.x = x_dest
player.y = y_dest
@property
def tile_dest(self) -> tuple:
movement_matrix = {}
movement_matrix["left"] = (-1, 0)
movement_matrix["right"] = (+1, 0)
movement_matrix["up"] = (0, -1)
movement_matrix["down"] = (0, +1)
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
background = self.town.backgrounds[tile]
bg_multiplicator = background.move_multiplicator
x_dest = (
player.x
+ movement_matrix[self._direction][0] * bg_multiplicator * player.velocity
)
y_dest = (
player.y
+ movement_matrix[self._direction][1] * bg_multiplicator * player.velocity
)
return (x_dest, y_dest)
def _get_tiles_coordinates_dict(self):
(x_dest, y_dest) = self.tile_dest
tiles_coordinates_dict = {
"topleft": (math.floor(x_dest), math.floor(y_dest)),
"topright": (math.floor(x_dest + 0.99), math.floor(y_dest)),
"bottomleft": (math.floor(x_dest), math.floor(y_dest + 0.99)),
"bottomright": (math.floor(x_dest + 0.99), math.floor(y_dest + 0.99)),
}
return tiles_coordinates_dict
@classmethod
def from_json_dict(cls, json_dict) -> MovePlayerCommand:
return cls(json_dict["direction"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "move"
json_dict["direction"] = self._direction
return json_dict
class BuildCommand(ServerCommand):
def __init__(self, tile: tuple, building_name: str):
ServerCommand.__init__(self)
self._tile = tile
self._building_name = building_name
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.backgrounds:
self.check_result += "tile {} not in town".format(self._tile)
return
background = self.town.backgrounds[self._tile]
BackgroundBuildCheck(background, self._building_name).check(self.check_result)
if self._tile in self.town.buildings:
self.check_result += "Can't build {} : {} already built on {}".format(
self._building_name, self.town.buildings[self._tile].name, self._tile
)
def _do(self):
self.town.set_building(
BuildingFactory.create_building_by_name(self._building_name), self._tile
)
def __repr__(self):
msg = "Build ServerCommand : {} in {}".format(self._building_name, self._tile)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> BuildCommand:
return cls(json_dict["tile"], json_dict["building_name"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "build"
json_dict["building_name"] = self._building_name
json_dict["tile"] = self._tile
return json_dict
class CollectResourceCommand(ServerCommand):
ENERGY_COST = 30
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._tile = tile
self._item = item
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.resources:
self.check_result += "No resource in {}".format(self._tile)
return
resource = self.town.resources[self._tile]
TransactionCheck(resource, player, self._item).check(self.check_result)
EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.check_result)
def _do(self):
player = self.town.get_player(self.client_id)
player.inventory.add_item(self._item)
resource = self.town.resources[self._tile]
resource.inventory.remove_item(self._item)
player.energy.value -= CollectResourceCommand.ENERGY_COST
def __repr__(self):
msg = "Collect Resource ServerCommand : {}".format(self._item)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> CollectResourceCommand:
return cls(json_dict["tile"], Item.from_json_dict(json_dict["item"]))
def to_json_dict(self) -> dict:
json_dict = super().to_json_dict()
json_dict["command"] = "collect"
json_dict["tile"] = self._tile
json_dict["item"] = self._item.to_json_dict()
return json_dict
class BuildingProcessCommand(ServerCommand):
def __init__(self, tile: tuple, building_process: BuildingProcess):
ServerCommand.__init__(self)
self._tile = tile
self._building_process = building_process
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if self._tile not in self.town.buildings:
self.check_result += "No building on {}".format(self._tile)
return
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
InventoryRemoveCheck(
building.inventory, self._building_process.item_required
).check(self.check_result)
InventoryAddCheck(building.inventory, self._building_process.item_result).check(
self.check_result
)
EnergyCheck(player, self._building_process.energy_required).check(
self.check_result
)
def _do(self):
building = self.town.buildings[self._tile]
building.inventory.remove_item(self._building_process.item_required)
building.inventory.add_item(self._building_process.item_result)
player = self.town.get_player(self.client_id)
player.energy.value -= self._building_process.energy_required
def __repr__(self):
msg = "BuildingProcessCommand ServerCommand {}".format(self._building_process)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingProcess.from_json_dict(json_dict["building_process"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "building_process"
json_dict["tile"] = self._tile
json_dict["building_process"] = self._building_process.to_json_dict()
return json_dict
class BuyCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(building, player, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.remove_item(item)
player.inventory.add_item(item)
def __repr__(self):
msg = "BuyCommand ServerCommand {}".format(self._transaction.item_name)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingTransaction.from_json_dict(json_dict["transaction"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "buy"
json_dict["tile"] = self._tile
json_dict["transaction"] = self._transaction.to_json_dict()
return json_dict
class SellCommand(ServerCommand):
def __init__(self, tile: tuple, transaction: BuildingTransaction):
ServerCommand.__init__(self)
self._tile = tile
self._transaction = transaction
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
item = Item(self._transaction.item_name, 1)
AvailableCheck(player).check(self.check_result)
TransactionCheck(player, building, item).check(self.check_result)
def _do(self):
item = Item(self._transaction.item_name, 1)
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
building.inventory.add_item(item)
player.inventory.remove_item(item)
def __repr__(self):
msg = "SellCommand ServerCommand {}".format(self._transaction.item_name)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(
json_dict["tile"],
BuildingTransaction.from_json_dict(json_dict["transaction"]),
)
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "sell"
json_dict["tile"] = self._tile
json_dict["transaction"] = self._transaction.to_json_dict()
return json_dict
class BuildBuildingCommand(ServerCommand):
ENERGY_COST = 20
def __init__(self, tile: tuple, item: Item):
ServerCommand.__init__(self)
self._item = item
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.check_result)
TransactionCheck(building, building, self._item).check(self.check_result)
def _do(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
player.energy.value -= BuildBuildingCommand.ENERGY_COST
building.inventory.remove_item(self._item)
building.construction_inventory.add_item(self._item)
def __repr__(self):
msg = "Build Building ServerCommand {}".format(self._item)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict["tile"], Item.from_json_dict(json_dict["item"]))
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "build_building"
json_dict["tile"] = self._tile
json_dict["item"] = self._item.to_json_dict()
return json_dict
class UpgradeBuildingCommand(ServerCommand):
def __init__(self, tile: tuple):
ServerCommand.__init__(self)
self._tile = tile
def _check(self):
building = self.town.buildings[self._tile]
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
if not building.construction_inventory.is_full():
self.check_result += "construction not finished"
def _do(self):
building = self.town.buildings[self._tile]
building.upgrade()
def __repr__(self):
msg = "Upgrade Building ServerCommand {}".format(self._tile)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict):
return cls(json_dict["tile"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "upgrade_building"
json_dict["tile"] = self._tile
return json_dict
class SleepCommand(ServerCommand):
ENERGY_REGEN_IN_HOUSE = 4
ENERGY_REGEN_IN_GROUND = 2
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
tile = self.town.get_player_tile(self.client_id)
# Player not in building
if tile in self.town.buildings and self.town.buildings[tile].name != "cabane":
self.check_result += "Can't sleep in building"
def _do(self):
player = self.town.get_player(self.client_id)
tile = self.town.get_player_tile(self.client_id)
# Change player sprite
player.status = "sleep"
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND
# Change energy regeneration depending on where he sleeps
if tile in self.town.buildings and self.town.buildings[tile].name == "cabane":
player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE
def __repr__(self):
msg = "Sleep command. Player id: {}".format(self.client_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> SleepCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "sleep"
return json_dict
class WakeUpCommand(ServerCommand):
def __init__(self):
ServerCommand.__init__(self)
def _check(self):
player = self.town.get_player(self.client_id)
is_awaken_check = CheckResult()
AwakenCheck(player).check(is_awaken_check)
if is_awaken_check:
self.check_result += "{} is already awake".format(player.name)
def _do(self):
player = self.town.get_player(self.client_id)
player.status = "idle"
player.energy.reset_regen()
def __repr__(self):
msg = "Wake up command. Player id: {}".format(self.client_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> WakeUpCommand:
return cls()
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "wakeup"
return json_dict
class HelpPlayerCommand(ServerCommand):
ENERGY_TO_HELP = 20
HEALTH_TO_GIVE = 1
def __init__(self, player_to_help_id):
ServerCommand.__init__(self)
self._player_to_help_id = player_to_help_id
def _check(self):
player = self.town.get_player(self.client_id)
AvailableCheck(player).check(self.check_result)
# The two players id exists in the town ?
if self.client_id not in self.town.players.keys():
self.check_result += "Player {} does not exist".format(self.client_id)
return
if self._player_to_help_id not in self.town.players.keys():
self.check_result += "Player {} does not exist".format(
self._player_to_help_id
)
return
# Check if the two players are in the same tile
if self.town.get_player_tile(self.client_id) != self.town.get_player_tile(
self._player_to_help_id
):
self.check_result += "Players {} and {} are not in the same tile".format(
self.client_id, self._player_to_help_id
)
return
# Check if I have enough energy to help
EnergyCheck(
self.town.get_player(self.client_id), HelpPlayerCommand.ENERGY_TO_HELP
).check(self.check_result)
# Check if patient doesn't have health
is_alive_check = CheckResult()
AvailableCheck(self.town.get_player(self._player_to_help_id)).check(
is_alive_check
)
if is_alive_check:
self.check_result += "{} has enough health to keep moving".format(
self._player_to_help_id
)
def _do(self):
player_helper = self.town.get_player(self.client_id)
player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP
player_to_help = self.town.get_player(self._player_to_help_id)
player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE
def __repr__(self):
msg = "HelpPlayerCommand: try to help {}".format(self._player_to_help_id)
if not self.check_result:
msg += "\n{}".format(self.check_result)
return msg
@classmethod
def from_json_dict(cls, json_dict: dict) -> HelpPlayerCommand:
return cls(json_dict["player_to_help_id"])
def to_json_dict(self):
json_dict = super().to_json_dict()
json_dict["command"] = "help"
json_dict["player_to_help_id"] = self._player_to_help_id
return json_dict
class CommandsFactory:
COMMANDS_DICT = {}
COMMANDS_DICT["move"] = MovePlayerCommand
COMMANDS_DICT["build"] = BuildCommand
COMMANDS_DICT["collect"] = CollectResourceCommand
COMMANDS_DICT["building_process"] = BuildingProcessCommand
COMMANDS_DICT["buy"] = BuyCommand
COMMANDS_DICT["sell"] = SellCommand
COMMANDS_DICT["build_building"] = BuildBuildingCommand
COMMANDS_DICT["upgrade_building"] = UpgradeBuildingCommand
COMMANDS_DICT["help"] = HelpPlayerCommand
COMMANDS_DICT["sleep"] = SleepCommand
COMMANDS_DICT["wakeup"] = WakeUpCommand
@staticmethod
def from_podsixnet(podsixnet_dict):
if podsixnet_dict["command"] in CommandsFactory.COMMANDS_DICT:
command = CommandsFactory.COMMANDS_DICT[
podsixnet_dict["command"]
].from_json_dict(podsixnet_dict)
else:
raise NotImplementedError
command.client_id = podsixnet_dict["client_id"]
command.check_result = CheckResult.from_json_dict(
podsixnet_dict["check_result"]
)
return command
|
flexible
|
{
"blob_id": "22b9868063d6c5fc3f8b08a6e725fff40f4a1a03",
"index": 3886,
"step-1": "<mask token>\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-2": "<mask token>\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->BuildCommand:\n return cls(json_dict['tile'], json_dict['building_name'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build'\n json_dict['building_name'] = self._building_name\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-3": "<mask token>\n\n\nclass MovePlayerCommand(ServerCommand):\n <mask token>\n <mask token>\n\n def __repr__(self):\n msg = 'Move ServerCommand : {}'.format(self._direction)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.\n check_result)\n AvailableCheck(player).check(self.check_result)\n for tile in self._get_tiles_coordinates_dict().values():\n if tile not in self.town.backgrounds.keys():\n self.check_result += 'tile {} not in town'.format(tile)\n return\n BackgroundMovementCheck(self.town.backgrounds[tile], player).check(\n self.check_result)\n\n def _do(self):\n x_dest, y_dest = self.tile_dest\n player = self.town.get_player(self.client_id)\n player.status = 'move'\n player.direction = self._direction\n player.energy.value -= MovePlayerCommand.ENERGY_COST\n player.x = x_dest\n player.y = y_dest\n\n @property\n def tile_dest(self) ->tuple:\n movement_matrix = {}\n movement_matrix['left'] = -1, 0\n movement_matrix['right'] = +1, 0\n movement_matrix['up'] = 0, -1\n movement_matrix['down'] = 0, +1\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n background = self.town.backgrounds[tile]\n bg_multiplicator = background.move_multiplicator\n x_dest = player.x + movement_matrix[self._direction][0\n ] * bg_multiplicator * player.velocity\n y_dest = player.y + movement_matrix[self._direction][1\n ] * bg_multiplicator * player.velocity\n return x_dest, y_dest\n\n def _get_tiles_coordinates_dict(self):\n x_dest, y_dest = self.tile_dest\n tiles_coordinates_dict = {'topleft': (math.floor(x_dest), math.\n floor(y_dest)), 'topright': (math.floor(x_dest + 0.99), math.\n floor(y_dest)), 'bottomleft': (math.floor(x_dest), math.floor(\n y_dest + 0.99)), 'bottomright': (math.floor(x_dest + 0.99),\n math.floor(y_dest + 0.99))}\n return tiles_coordinates_dict\n\n @classmethod\n def from_json_dict(cls, json_dict) ->MovePlayerCommand:\n return cls(json_dict['direction'])\n <mask token>\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.backgrounds:\n self.check_result += 'tile {} not in town'.format(self._tile)\n return\n background = self.town.backgrounds[self._tile]\n BackgroundBuildCheck(background, self._building_name).check(self.\n check_result)\n if self._tile in self.town.buildings:\n self.check_result += (\"Can't build {} : {} already built on {}\"\n .format(self._building_name, self.town.buildings[self._tile\n ].name, self._tile))\n\n def _do(self):\n self.town.set_building(BuildingFactory.create_building_by_name(self\n ._building_name), self._tile)\n\n def __repr__(self):\n msg = 'Build ServerCommand : {} in {}'.format(self._building_name,\n self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->BuildCommand:\n return cls(json_dict['tile'], json_dict['building_name'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build'\n json_dict['building_name'] = self._building_name\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-4": "<mask token>\n\n\nclass MovePlayerCommand(ServerCommand):\n <mask token>\n\n def __init__(self, direction: str):\n ServerCommand.__init__(self)\n self._direction = direction\n\n def __repr__(self):\n msg = 'Move ServerCommand : {}'.format(self._direction)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.\n check_result)\n AvailableCheck(player).check(self.check_result)\n for tile in self._get_tiles_coordinates_dict().values():\n if tile not in self.town.backgrounds.keys():\n self.check_result += 'tile {} not in town'.format(tile)\n return\n BackgroundMovementCheck(self.town.backgrounds[tile], player).check(\n self.check_result)\n\n def _do(self):\n x_dest, y_dest = self.tile_dest\n player = self.town.get_player(self.client_id)\n player.status = 'move'\n player.direction = self._direction\n player.energy.value -= MovePlayerCommand.ENERGY_COST\n player.x = x_dest\n player.y = y_dest\n\n @property\n def tile_dest(self) ->tuple:\n movement_matrix = {}\n movement_matrix['left'] = -1, 0\n movement_matrix['right'] = +1, 0\n movement_matrix['up'] = 0, -1\n movement_matrix['down'] = 0, +1\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n background = self.town.backgrounds[tile]\n bg_multiplicator = background.move_multiplicator\n x_dest = player.x + movement_matrix[self._direction][0\n ] * bg_multiplicator * player.velocity\n y_dest = player.y + movement_matrix[self._direction][1\n ] * bg_multiplicator * player.velocity\n return x_dest, y_dest\n\n def _get_tiles_coordinates_dict(self):\n x_dest, y_dest = self.tile_dest\n tiles_coordinates_dict = {'topleft': (math.floor(x_dest), math.\n floor(y_dest)), 'topright': (math.floor(x_dest + 0.99), math.\n floor(y_dest)), 'bottomleft': (math.floor(x_dest), math.floor(\n y_dest + 0.99)), 'bottomright': (math.floor(x_dest + 0.99),\n math.floor(y_dest + 0.99))}\n return tiles_coordinates_dict\n\n @classmethod\n def from_json_dict(cls, json_dict) ->MovePlayerCommand:\n return cls(json_dict['direction'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'move'\n json_dict['direction'] = self._direction\n return json_dict\n\n\nclass BuildCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_name = building_name\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.backgrounds:\n self.check_result += 'tile {} not in town'.format(self._tile)\n return\n background = self.town.backgrounds[self._tile]\n BackgroundBuildCheck(background, self._building_name).check(self.\n check_result)\n if self._tile in self.town.buildings:\n self.check_result += (\"Can't build {} : {} already built on {}\"\n .format(self._building_name, self.town.buildings[self._tile\n ].name, self._tile))\n\n def _do(self):\n self.town.set_building(BuildingFactory.create_building_by_name(self\n ._building_name), self._tile)\n\n def __repr__(self):\n msg = 'Build ServerCommand : {} in {}'.format(self._building_name,\n self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->BuildCommand:\n return cls(json_dict['tile'], json_dict['building_name'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build'\n json_dict['building_name'] = self._building_name\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.resources:\n self.check_result += 'No resource in {}'.format(self._tile)\n return\n resource = self.town.resources[self._tile]\n TransactionCheck(resource, player, self._item).check(self.check_result)\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.\n check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = 'Collect Resource ServerCommand : {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->CollectResourceCommand:\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self) ->dict:\n json_dict = super().to_json_dict()\n json_dict['command'] = 'collect'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self._tile not in self.town.buildings:\n self.check_result += 'No building on {}'.format(self._tile)\n return\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n InventoryRemoveCheck(building.inventory, self._building_process.\n item_required).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.\n item_result).check(self.check_result)\n EnergyCheck(player, self._building_process.energy_required).check(self\n .check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = 'BuildingProcessCommand ServerCommand {}'.format(self.\n _building_process)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingProcess.from_json_dict(\n json_dict['building_process']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'building_process'\n json_dict['tile'] = self._tile\n json_dict['building_process'] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = 'BuyCommand ServerCommand {}'.format(self._transaction.item_name)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'buy'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n item = Item(self._transaction.item_name, 1)\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = 'SellCommand ServerCommand {}'.format(self._transaction.item_name\n )\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], BuildingTransaction.from_json_dict(\n json_dict['transaction']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sell'\n json_dict['tile'] = self._tile\n json_dict['transaction'] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.\n check_result)\n TransactionCheck(building, building, self._item).check(self.\n check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = 'Build Building ServerCommand {}'.format(self._item)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'], Item.from_json_dict(json_dict['item']))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'build_building'\n json_dict['tile'] = self._tile\n json_dict['item'] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if not building.construction_inventory.is_full():\n self.check_result += 'construction not finished'\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = 'Upgrade Building ServerCommand {}'.format(self._tile)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict['tile'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'upgrade_building'\n json_dict['tile'] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n tile = self.town.get_player_tile(self.client_id)\n if tile in self.town.buildings and self.town.buildings[tile\n ].name != 'cabane':\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n player.status = 'sleep'\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n if tile in self.town.buildings and self.town.buildings[tile\n ].name == 'cabane':\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = 'Sleep command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'sleep'\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n if is_awaken_check:\n self.check_result += '{} is already awake'.format(player.name)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.status = 'idle'\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = 'Wake up command. Player id: {}'.format(self.client_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'wakeup'\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n if self.client_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n client_id)\n return\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += 'Player {} does not exist'.format(self.\n _player_to_help_id)\n return\n if self.town.get_player_tile(self.client_id\n ) != self.town.get_player_tile(self._player_to_help_id):\n self.check_result += ('Players {} and {} are not in the same tile'\n .format(self.client_id, self._player_to_help_id))\n return\n EnergyCheck(self.town.get_player(self.client_id), HelpPlayerCommand\n .ENERGY_TO_HELP).check(self.check_result)\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check)\n if is_alive_check:\n self.check_result += '{} has enough health to keep moving'.format(\n self._player_to_help_id)\n\n def _do(self):\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = 'HelpPlayerCommand: try to help {}'.format(self.\n _player_to_help_id)\n if not self.check_result:\n msg += '\\n{}'.format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) ->HelpPlayerCommand:\n return cls(json_dict['player_to_help_id'])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict['command'] = 'help'\n json_dict['player_to_help_id'] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n COMMANDS_DICT = {}\n COMMANDS_DICT['move'] = MovePlayerCommand\n COMMANDS_DICT['build'] = BuildCommand\n COMMANDS_DICT['collect'] = CollectResourceCommand\n COMMANDS_DICT['building_process'] = BuildingProcessCommand\n COMMANDS_DICT['buy'] = BuyCommand\n COMMANDS_DICT['sell'] = SellCommand\n COMMANDS_DICT['build_building'] = BuildBuildingCommand\n COMMANDS_DICT['upgrade_building'] = UpgradeBuildingCommand\n COMMANDS_DICT['help'] = HelpPlayerCommand\n COMMANDS_DICT['sleep'] = SleepCommand\n COMMANDS_DICT['wakeup'] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n if podsixnet_dict['command'] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[podsixnet_dict['command']\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n command.client_id = podsixnet_dict['client_id']\n command.check_result = CheckResult.from_json_dict(podsixnet_dict[\n 'check_result'])\n return command\n",
"step-5": "from __future__ import annotations\n\nimport math\nfrom abc import abstractmethod\n\nfrom pytown_core.patterns.behavioral import Command\nfrom pytown_core.serializers import IJSONSerializable\n\nfrom .buildings import BuildingProcess, BuildingTransaction\nfrom .buildings.factory import BuildingFactory\nfrom .check import (\n AvailableCheck,\n AwakenCheck,\n BackgroundBuildCheck,\n BackgroundMovementCheck,\n CheckResult,\n EnergyCheck,\n InventoryAddCheck,\n InventoryRemoveCheck,\n TransactionCheck,\n)\nfrom .inventory import Item\n\n\nclass ServerCommand(IJSONSerializable, Command):\n def __init__(self):\n\n self.client_id = None\n self.town = None # TODO: will be set by townmanager\n self.check_result = CheckResult()\n\n def execute(self):\n self._check()\n\n if self.check_result:\n self._do()\n\n @abstractmethod\n def _check(self):\n raise NotImplementedError\n\n @abstractmethod\n def _do(self):\n raise NotImplementedError\n\n @abstractmethod\n def __repr__(self):\n pass\n\n @classmethod\n @abstractmethod\n def from_json_dict(cls, json_dict) -> ServerCommand:\n raise NotImplementedError\n\n def to_json_dict(self) -> dict:\n json_dict = {}\n json_dict[\"client_id\"] = self.client_id\n json_dict[\"check_result\"] = self.check_result.to_json_dict()\n return json_dict\n\n def to_podsixnet(self):\n podsixnet_dict = self.to_json_dict()\n podsixnet_dict[\"action\"] = \"command\"\n return podsixnet_dict\n\n\nclass MovePlayerCommand(ServerCommand):\n\n ENERGY_COST = 1\n\n def __init__(self, direction: str):\n ServerCommand.__init__(self)\n\n self._direction = direction\n\n def __repr__(self):\n msg = \"Move ServerCommand : {}\".format(self._direction)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n EnergyCheck(player, MovePlayerCommand.ENERGY_COST).check(self.check_result)\n\n AvailableCheck(player).check(self.check_result)\n\n for tile in self._get_tiles_coordinates_dict().values():\n if tile not in self.town.backgrounds.keys():\n self.check_result += \"tile {} not in town\".format(tile)\n return\n\n BackgroundMovementCheck(self.town.backgrounds[tile], player).check(\n self.check_result\n )\n\n def _do(self):\n\n (x_dest, y_dest) = self.tile_dest\n player = self.town.get_player(self.client_id)\n player.status = \"move\"\n player.direction = self._direction\n player.energy.value -= MovePlayerCommand.ENERGY_COST\n\n player.x = x_dest\n player.y = y_dest\n\n @property\n def tile_dest(self) -> tuple:\n movement_matrix = {}\n movement_matrix[\"left\"] = (-1, 0)\n movement_matrix[\"right\"] = (+1, 0)\n movement_matrix[\"up\"] = (0, -1)\n movement_matrix[\"down\"] = (0, +1)\n\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n background = self.town.backgrounds[tile]\n\n bg_multiplicator = background.move_multiplicator\n x_dest = (\n player.x\n + movement_matrix[self._direction][0] * bg_multiplicator * player.velocity\n )\n y_dest = (\n player.y\n + movement_matrix[self._direction][1] * bg_multiplicator * player.velocity\n )\n\n return (x_dest, y_dest)\n\n def _get_tiles_coordinates_dict(self):\n\n (x_dest, y_dest) = self.tile_dest\n\n tiles_coordinates_dict = {\n \"topleft\": (math.floor(x_dest), math.floor(y_dest)),\n \"topright\": (math.floor(x_dest + 0.99), math.floor(y_dest)),\n \"bottomleft\": (math.floor(x_dest), math.floor(y_dest + 0.99)),\n \"bottomright\": (math.floor(x_dest + 0.99), math.floor(y_dest + 0.99)),\n }\n return tiles_coordinates_dict\n\n @classmethod\n def from_json_dict(cls, json_dict) -> MovePlayerCommand:\n return cls(json_dict[\"direction\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"move\"\n json_dict[\"direction\"] = self._direction\n return json_dict\n\n\nclass BuildCommand(ServerCommand):\n def __init__(self, tile: tuple, building_name: str):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._building_name = building_name\n\n def _check(self):\n\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n if self._tile not in self.town.backgrounds:\n self.check_result += \"tile {} not in town\".format(self._tile)\n return\n\n background = self.town.backgrounds[self._tile]\n BackgroundBuildCheck(background, self._building_name).check(self.check_result)\n\n if self._tile in self.town.buildings:\n self.check_result += \"Can't build {} : {} already built on {}\".format(\n self._building_name, self.town.buildings[self._tile].name, self._tile\n )\n\n def _do(self):\n self.town.set_building(\n BuildingFactory.create_building_by_name(self._building_name), self._tile\n )\n\n def __repr__(self):\n msg = \"Build ServerCommand : {} in {}\".format(self._building_name, self._tile)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> BuildCommand:\n return cls(json_dict[\"tile\"], json_dict[\"building_name\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"build\"\n json_dict[\"building_name\"] = self._building_name\n json_dict[\"tile\"] = self._tile\n return json_dict\n\n\nclass CollectResourceCommand(ServerCommand):\n\n ENERGY_COST = 30\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._item = item\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n\n AvailableCheck(player).check(self.check_result)\n\n if self._tile not in self.town.resources:\n self.check_result += \"No resource in {}\".format(self._tile)\n return\n\n resource = self.town.resources[self._tile]\n\n TransactionCheck(resource, player, self._item).check(self.check_result)\n\n EnergyCheck(player, CollectResourceCommand.ENERGY_COST).check(self.check_result)\n\n def _do(self):\n player = self.town.get_player(self.client_id)\n player.inventory.add_item(self._item)\n resource = self.town.resources[self._tile]\n resource.inventory.remove_item(self._item)\n player.energy.value -= CollectResourceCommand.ENERGY_COST\n\n def __repr__(self):\n msg = \"Collect Resource ServerCommand : {}\".format(self._item)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> CollectResourceCommand:\n return cls(json_dict[\"tile\"], Item.from_json_dict(json_dict[\"item\"]))\n\n def to_json_dict(self) -> dict:\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"collect\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"item\"] = self._item.to_json_dict()\n return json_dict\n\n\nclass BuildingProcessCommand(ServerCommand):\n def __init__(self, tile: tuple, building_process: BuildingProcess):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._building_process = building_process\n\n def _check(self):\n\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n if self._tile not in self.town.buildings:\n self.check_result += \"No building on {}\".format(self._tile)\n return\n\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n InventoryRemoveCheck(\n building.inventory, self._building_process.item_required\n ).check(self.check_result)\n InventoryAddCheck(building.inventory, self._building_process.item_result).check(\n self.check_result\n )\n EnergyCheck(player, self._building_process.energy_required).check(\n self.check_result\n )\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.inventory.remove_item(self._building_process.item_required)\n building.inventory.add_item(self._building_process.item_result)\n player = self.town.get_player(self.client_id)\n player.energy.value -= self._building_process.energy_required\n\n def __repr__(self):\n msg = \"BuildingProcessCommand ServerCommand {}\".format(self._building_process)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(\n json_dict[\"tile\"],\n BuildingProcess.from_json_dict(json_dict[\"building_process\"]),\n )\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"building_process\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"building_process\"] = self._building_process.to_json_dict()\n return json_dict\n\n\nclass BuyCommand(ServerCommand):\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n item = Item(self._transaction.item_name, 1)\n\n AvailableCheck(player).check(self.check_result)\n\n TransactionCheck(building, player, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.remove_item(item)\n player.inventory.add_item(item)\n\n def __repr__(self):\n msg = \"BuyCommand ServerCommand {}\".format(self._transaction.item_name)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(\n json_dict[\"tile\"],\n BuildingTransaction.from_json_dict(json_dict[\"transaction\"]),\n )\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"buy\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"transaction\"] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass SellCommand(ServerCommand):\n def __init__(self, tile: tuple, transaction: BuildingTransaction):\n ServerCommand.__init__(self)\n\n self._tile = tile\n self._transaction = transaction\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n item = Item(self._transaction.item_name, 1)\n\n AvailableCheck(player).check(self.check_result)\n TransactionCheck(player, building, item).check(self.check_result)\n\n def _do(self):\n item = Item(self._transaction.item_name, 1)\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n building.inventory.add_item(item)\n player.inventory.remove_item(item)\n\n def __repr__(self):\n msg = \"SellCommand ServerCommand {}\".format(self._transaction.item_name)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(\n json_dict[\"tile\"],\n BuildingTransaction.from_json_dict(json_dict[\"transaction\"]),\n )\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"sell\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"transaction\"] = self._transaction.to_json_dict()\n return json_dict\n\n\nclass BuildBuildingCommand(ServerCommand):\n\n ENERGY_COST = 20\n\n def __init__(self, tile: tuple, item: Item):\n ServerCommand.__init__(self)\n\n self._item = item\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n AvailableCheck(player).check(self.check_result)\n\n EnergyCheck(player, BuildBuildingCommand.ENERGY_COST).check(self.check_result)\n TransactionCheck(building, building, self._item).check(self.check_result)\n\n def _do(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n\n player.energy.value -= BuildBuildingCommand.ENERGY_COST\n building.inventory.remove_item(self._item)\n building.construction_inventory.add_item(self._item)\n\n def __repr__(self):\n msg = \"Build Building ServerCommand {}\".format(self._item)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict[\"tile\"], Item.from_json_dict(json_dict[\"item\"]))\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"build_building\"\n json_dict[\"tile\"] = self._tile\n json_dict[\"item\"] = self._item.to_json_dict()\n return json_dict\n\n\nclass UpgradeBuildingCommand(ServerCommand):\n def __init__(self, tile: tuple):\n ServerCommand.__init__(self)\n\n self._tile = tile\n\n def _check(self):\n building = self.town.buildings[self._tile]\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n if not building.construction_inventory.is_full():\n self.check_result += \"construction not finished\"\n\n def _do(self):\n building = self.town.buildings[self._tile]\n building.upgrade()\n\n def __repr__(self):\n msg = \"Upgrade Building ServerCommand {}\".format(self._tile)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict):\n return cls(json_dict[\"tile\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"upgrade_building\"\n json_dict[\"tile\"] = self._tile\n return json_dict\n\n\nclass SleepCommand(ServerCommand):\n\n ENERGY_REGEN_IN_HOUSE = 4\n ENERGY_REGEN_IN_GROUND = 2\n\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n\n tile = self.town.get_player_tile(self.client_id)\n\n # Player not in building\n if tile in self.town.buildings and self.town.buildings[tile].name != \"cabane\":\n self.check_result += \"Can't sleep in building\"\n\n def _do(self):\n\n player = self.town.get_player(self.client_id)\n tile = self.town.get_player_tile(self.client_id)\n\n # Change player sprite\n player.status = \"sleep\"\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_GROUND\n\n # Change energy regeneration depending on where he sleeps\n if tile in self.town.buildings and self.town.buildings[tile].name == \"cabane\":\n player.energy.regen = SleepCommand.ENERGY_REGEN_IN_HOUSE\n\n def __repr__(self):\n msg = \"Sleep command. Player id: {}\".format(self.client_id)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> SleepCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"sleep\"\n return json_dict\n\n\nclass WakeUpCommand(ServerCommand):\n def __init__(self):\n ServerCommand.__init__(self)\n\n def _check(self):\n\n player = self.town.get_player(self.client_id)\n\n is_awaken_check = CheckResult()\n AwakenCheck(player).check(is_awaken_check)\n\n if is_awaken_check:\n self.check_result += \"{} is already awake\".format(player.name)\n\n def _do(self):\n\n player = self.town.get_player(self.client_id)\n player.status = \"idle\"\n\n player.energy.reset_regen()\n\n def __repr__(self):\n msg = \"Wake up command. Player id: {}\".format(self.client_id)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> WakeUpCommand:\n return cls()\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"wakeup\"\n return json_dict\n\n\nclass HelpPlayerCommand(ServerCommand):\n\n ENERGY_TO_HELP = 20\n HEALTH_TO_GIVE = 1\n\n def __init__(self, player_to_help_id):\n ServerCommand.__init__(self)\n\n self._player_to_help_id = player_to_help_id\n\n def _check(self):\n player = self.town.get_player(self.client_id)\n AvailableCheck(player).check(self.check_result)\n\n # The two players id exists in the town ?\n if self.client_id not in self.town.players.keys():\n self.check_result += \"Player {} does not exist\".format(self.client_id)\n return\n\n if self._player_to_help_id not in self.town.players.keys():\n self.check_result += \"Player {} does not exist\".format(\n self._player_to_help_id\n )\n return\n\n # Check if the two players are in the same tile\n if self.town.get_player_tile(self.client_id) != self.town.get_player_tile(\n self._player_to_help_id\n ):\n self.check_result += \"Players {} and {} are not in the same tile\".format(\n self.client_id, self._player_to_help_id\n )\n return\n\n # Check if I have enough energy to help\n EnergyCheck(\n self.town.get_player(self.client_id), HelpPlayerCommand.ENERGY_TO_HELP\n ).check(self.check_result)\n\n # Check if patient doesn't have health\n is_alive_check = CheckResult()\n AvailableCheck(self.town.get_player(self._player_to_help_id)).check(\n is_alive_check\n )\n\n if is_alive_check:\n self.check_result += \"{} has enough health to keep moving\".format(\n self._player_to_help_id\n )\n\n def _do(self):\n\n player_helper = self.town.get_player(self.client_id)\n player_helper.energy.value -= HelpPlayerCommand.ENERGY_TO_HELP\n\n player_to_help = self.town.get_player(self._player_to_help_id)\n player_to_help.health.value += HelpPlayerCommand.HEALTH_TO_GIVE\n\n def __repr__(self):\n msg = \"HelpPlayerCommand: try to help {}\".format(self._player_to_help_id)\n if not self.check_result:\n msg += \"\\n{}\".format(self.check_result)\n return msg\n\n @classmethod\n def from_json_dict(cls, json_dict: dict) -> HelpPlayerCommand:\n return cls(json_dict[\"player_to_help_id\"])\n\n def to_json_dict(self):\n json_dict = super().to_json_dict()\n json_dict[\"command\"] = \"help\"\n json_dict[\"player_to_help_id\"] = self._player_to_help_id\n return json_dict\n\n\nclass CommandsFactory:\n\n COMMANDS_DICT = {}\n COMMANDS_DICT[\"move\"] = MovePlayerCommand\n COMMANDS_DICT[\"build\"] = BuildCommand\n COMMANDS_DICT[\"collect\"] = CollectResourceCommand\n COMMANDS_DICT[\"building_process\"] = BuildingProcessCommand\n COMMANDS_DICT[\"buy\"] = BuyCommand\n COMMANDS_DICT[\"sell\"] = SellCommand\n COMMANDS_DICT[\"build_building\"] = BuildBuildingCommand\n COMMANDS_DICT[\"upgrade_building\"] = UpgradeBuildingCommand\n COMMANDS_DICT[\"help\"] = HelpPlayerCommand\n COMMANDS_DICT[\"sleep\"] = SleepCommand\n COMMANDS_DICT[\"wakeup\"] = WakeUpCommand\n\n @staticmethod\n def from_podsixnet(podsixnet_dict):\n\n if podsixnet_dict[\"command\"] in CommandsFactory.COMMANDS_DICT:\n command = CommandsFactory.COMMANDS_DICT[\n podsixnet_dict[\"command\"]\n ].from_json_dict(podsixnet_dict)\n else:\n raise NotImplementedError\n\n command.client_id = podsixnet_dict[\"client_id\"]\n command.check_result = CheckResult.from_json_dict(\n podsixnet_dict[\"check_result\"]\n )\n return command\n",
"step-ids": [
72,
74,
84,
86,
98
]
}
|
[
72,
74,
84,
86,
98
] |
import numpy
numpy.random.seed(1)
M = 20
N = 100
import numpy as np
x = np.random.randn(N, 2)
w = np.random.randn(M, 2)
f = np.einsum('ik,jk->ij', w, x)
y = f + 0.1*np.random.randn(M, N)
D = 10
from bayespy.nodes import GaussianARD, Gamma, SumMultiply
X = GaussianARD(0, 1, plates=(1,N), shape=(D,))
alpha = Gamma(1e-5, 1e-5, plates=(D,))
C = GaussianARD(0, alpha, plates=(M,1), shape=(D,))
F = SumMultiply('d,d->', X, C)
tau = Gamma(1e-5, 1e-5)
Y = GaussianARD(F, tau)
Y.observe(y)
from bayespy.inference import VB
Q = VB(Y, X, C, alpha, tau)
C.initialize_from_random()
from bayespy.inference.vmp.transformations import RotateGaussianARD
rot_X = RotateGaussianARD(X)
rot_C = RotateGaussianARD(C, alpha)
from bayespy.inference.vmp.transformations import RotationOptimizer
R = RotationOptimizer(rot_X, rot_C, D)
Q.set_callback(R.rotate)
Q.update(repeat=1000)
import bayespy.plot as bpplt
bpplt.hinton(C)
|
normal
|
{
"blob_id": "9af2b94c6eef47dad0348a5437593cc8561a7deb",
"index": 3593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnumpy.random.seed(1)\n<mask token>\nY.observe(y)\n<mask token>\nC.initialize_from_random()\n<mask token>\nQ.set_callback(R.rotate)\nQ.update(repeat=1000)\n<mask token>\nbpplt.hinton(C)\n",
"step-3": "<mask token>\nnumpy.random.seed(1)\nM = 20\nN = 100\n<mask token>\nx = np.random.randn(N, 2)\nw = np.random.randn(M, 2)\nf = np.einsum('ik,jk->ij', w, x)\ny = f + 0.1 * np.random.randn(M, N)\nD = 10\n<mask token>\nX = GaussianARD(0, 1, plates=(1, N), shape=(D,))\nalpha = Gamma(1e-05, 1e-05, plates=(D,))\nC = GaussianARD(0, alpha, plates=(M, 1), shape=(D,))\nF = SumMultiply('d,d->', X, C)\ntau = Gamma(1e-05, 1e-05)\nY = GaussianARD(F, tau)\nY.observe(y)\n<mask token>\nQ = VB(Y, X, C, alpha, tau)\nC.initialize_from_random()\n<mask token>\nrot_X = RotateGaussianARD(X)\nrot_C = RotateGaussianARD(C, alpha)\n<mask token>\nR = RotationOptimizer(rot_X, rot_C, D)\nQ.set_callback(R.rotate)\nQ.update(repeat=1000)\n<mask token>\nbpplt.hinton(C)\n",
"step-4": "import numpy\nnumpy.random.seed(1)\nM = 20\nN = 100\nimport numpy as np\nx = np.random.randn(N, 2)\nw = np.random.randn(M, 2)\nf = np.einsum('ik,jk->ij', w, x)\ny = f + 0.1 * np.random.randn(M, N)\nD = 10\nfrom bayespy.nodes import GaussianARD, Gamma, SumMultiply\nX = GaussianARD(0, 1, plates=(1, N), shape=(D,))\nalpha = Gamma(1e-05, 1e-05, plates=(D,))\nC = GaussianARD(0, alpha, plates=(M, 1), shape=(D,))\nF = SumMultiply('d,d->', X, C)\ntau = Gamma(1e-05, 1e-05)\nY = GaussianARD(F, tau)\nY.observe(y)\nfrom bayespy.inference import VB\nQ = VB(Y, X, C, alpha, tau)\nC.initialize_from_random()\nfrom bayespy.inference.vmp.transformations import RotateGaussianARD\nrot_X = RotateGaussianARD(X)\nrot_C = RotateGaussianARD(C, alpha)\nfrom bayespy.inference.vmp.transformations import RotationOptimizer\nR = RotationOptimizer(rot_X, rot_C, D)\nQ.set_callback(R.rotate)\nQ.update(repeat=1000)\nimport bayespy.plot as bpplt\nbpplt.hinton(C)\n",
"step-5": "import numpy\nnumpy.random.seed(1)\nM = 20\nN = 100\nimport numpy as np\nx = np.random.randn(N, 2)\nw = np.random.randn(M, 2)\nf = np.einsum('ik,jk->ij', w, x)\ny = f + 0.1*np.random.randn(M, N)\nD = 10\nfrom bayespy.nodes import GaussianARD, Gamma, SumMultiply\nX = GaussianARD(0, 1, plates=(1,N), shape=(D,))\nalpha = Gamma(1e-5, 1e-5, plates=(D,))\nC = GaussianARD(0, alpha, plates=(M,1), shape=(D,))\nF = SumMultiply('d,d->', X, C)\ntau = Gamma(1e-5, 1e-5)\nY = GaussianARD(F, tau)\nY.observe(y)\nfrom bayespy.inference import VB\nQ = VB(Y, X, C, alpha, tau)\nC.initialize_from_random()\nfrom bayespy.inference.vmp.transformations import RotateGaussianARD\nrot_X = RotateGaussianARD(X)\nrot_C = RotateGaussianARD(C, alpha)\nfrom bayespy.inference.vmp.transformations import RotationOptimizer\nR = RotationOptimizer(rot_X, rot_C, D)\nQ.set_callback(R.rotate)\nQ.update(repeat=1000)\nimport bayespy.plot as bpplt\nbpplt.hinton(C)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tile_number(lon_deg, lat_deg, zoom):
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((lat_deg + 90.0) / 180.0 * n)
return xtile, ytile
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Distance(t1, t2):
RADIUS = 6371000.0
p1 = [0, 0]
p2 = [0, 0]
p1[0] = t1[0] * math.pi / 180.0
p1[1] = t1[1] * math.pi / 180.0
p2[0] = t2[0] * math.pi / 180.0
p2[1] = t2[1] * math.pi / 180.0
d_lat = p2[0] - p1[0]
d_lon = p2[1] - p1[1]
a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(p1[0]) * math.cos(
p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = RADIUS * c
return d
def tile_number(lon_deg, lat_deg, zoom):
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((lat_deg + 90.0) / 180.0 * n)
return xtile, ytile
<|reserved_special_token_1|>
import math
def Distance(t1, t2):
RADIUS = 6371000.0
p1 = [0, 0]
p2 = [0, 0]
p1[0] = t1[0] * math.pi / 180.0
p1[1] = t1[1] * math.pi / 180.0
p2[0] = t2[0] * math.pi / 180.0
p2[1] = t2[1] * math.pi / 180.0
d_lat = p2[0] - p1[0]
d_lon = p2[1] - p1[1]
a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(p1[0]) * math.cos(
p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = RADIUS * c
return d
def tile_number(lon_deg, lat_deg, zoom):
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((lat_deg + 90.0) / 180.0 * n)
return xtile, ytile
<|reserved_special_token_1|>
import math
def Distance(t1, t2):
RADIUS = 6371000. # earth's mean radius in km
p1 = [0, 0]
p2 = [0, 0]
p1[0] = t1[0] * math.pi / 180.
p1[1] = t1[1] * math.pi / 180.
p2[0] = t2[0] * math.pi / 180.
p2[1] = t2[1] * math.pi / 180.
d_lat = (p2[0] - p1[0])
d_lon = (p2[1] - p1[1])
a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(
p1[0]) * math.cos(p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = RADIUS * c
return d
def tile_number(lon_deg, lat_deg, zoom):
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((lat_deg + 90.0) / 180.0 * n)
return (xtile, ytile)
|
flexible
|
{
"blob_id": "f3f5b14917c89c5bc2866dd56e212bd3ec8af1cd",
"index": 4841,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return xtile, ytile\n",
"step-3": "<mask token>\n\n\ndef Distance(t1, t2):\n RADIUS = 6371000.0\n p1 = [0, 0]\n p2 = [0, 0]\n p1[0] = t1[0] * math.pi / 180.0\n p1[1] = t1[1] * math.pi / 180.0\n p2[0] = t2[0] * math.pi / 180.0\n p2[1] = t2[1] * math.pi / 180.0\n d_lat = p2[0] - p1[0]\n d_lon = p2[1] - p1[1]\n a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(p1[0]) * math.cos(\n p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n return d\n\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return xtile, ytile\n",
"step-4": "import math\n\n\ndef Distance(t1, t2):\n RADIUS = 6371000.0\n p1 = [0, 0]\n p2 = [0, 0]\n p1[0] = t1[0] * math.pi / 180.0\n p1[1] = t1[1] * math.pi / 180.0\n p2[0] = t2[0] * math.pi / 180.0\n p2[1] = t2[1] * math.pi / 180.0\n d_lat = p2[0] - p1[0]\n d_lon = p2[1] - p1[1]\n a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(p1[0]) * math.cos(\n p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n return d\n\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return xtile, ytile\n",
"step-5": "import math\n\ndef Distance(t1, t2):\n RADIUS = 6371000. # earth's mean radius in km\n p1 = [0, 0]\n p2 = [0, 0]\n p1[0] = t1[0] * math.pi / 180.\n p1[1] = t1[1] * math.pi / 180.\n p2[0] = t2[0] * math.pi / 180.\n p2[1] = t2[1] * math.pi / 180.\n\n d_lat = (p2[0] - p1[0])\n d_lon = (p2[1] - p1[1])\n\n a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(\n p1[0]) * math.cos(p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n return d\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return (xtile, ytile)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False
), err=err)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False
), err=err)
def show_fields(*fields):
def show(obj, verbose=False):
if verbose:
return obj
about = {}
for entry in fields:
if isinstance(entry, str):
entry = entry,
name, *subpath = entry
try:
value = obj[name]
except KeyError:
continue
for sp in subpath:
if value is None:
break
elif callable(sp):
value = sp(value)
elif isinstance(value, list):
value = [(v and v[sp]) for v in value]
else:
value = value[sp]
about[name] = value
return about
return show
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False
), err=err)
def show_fields(*fields):
def show(obj, verbose=False):
if verbose:
return obj
about = {}
for entry in fields:
if isinstance(entry, str):
entry = entry,
name, *subpath = entry
try:
value = obj[name]
except KeyError:
continue
for sp in subpath:
if value is None:
break
elif callable(sp):
value = sp(value)
elif isinstance(value, list):
value = [(v and v[sp]) for v in value]
else:
value = value[sp]
about[name] = value
return about
return show
repo_info = show_fields(('owner', 'login'), 'name', 'url', 'html_url',
'clone_url', 'git_url', 'ssh_url', 'full_name', 'description',
'homepage', 'private', 'default_branch', 'created_at', 'updated_at',
'pushed_at', 'fork', 'forks_count', 'watchers_count', 'size',
'subscribers_count', 'stargazers_count', 'id', 'language',
'network_count', 'open_issues_count', ('parent', 'full_name'), (
'source', 'full_name'))
gist_info = show_fields('id', 'url', 'git_push_url', ('files', lambda files:
{fname: {k: v for k, v in about.items() if k != 'content'} for fname,
about in files.items()}), 'public', 'html_url', ('owner', 'login'),
'description', 'created_at', 'updated_at', 'comments', ('fork_of', 'id'
), ('forks', 'id'))
issue_info = show_fields(('assignees', 'login'), 'closed_at', ('closed_by',
'login'), 'comments', 'created_at', 'html_url', 'id', ('labels', 'name'
), 'locked', ('milestone', 'title'), 'number', 'state', 'title',
'updated_at', 'url', ('user', 'login'), 'repository_url')
<|reserved_special_token_1|>
from collections.abc import Iterator
import json
import click
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False
), err=err)
def show_fields(*fields):
def show(obj, verbose=False):
if verbose:
return obj
about = {}
for entry in fields:
if isinstance(entry, str):
entry = entry,
name, *subpath = entry
try:
value = obj[name]
except KeyError:
continue
for sp in subpath:
if value is None:
break
elif callable(sp):
value = sp(value)
elif isinstance(value, list):
value = [(v and v[sp]) for v in value]
else:
value = value[sp]
about[name] = value
return about
return show
repo_info = show_fields(('owner', 'login'), 'name', 'url', 'html_url',
'clone_url', 'git_url', 'ssh_url', 'full_name', 'description',
'homepage', 'private', 'default_branch', 'created_at', 'updated_at',
'pushed_at', 'fork', 'forks_count', 'watchers_count', 'size',
'subscribers_count', 'stargazers_count', 'id', 'language',
'network_count', 'open_issues_count', ('parent', 'full_name'), (
'source', 'full_name'))
gist_info = show_fields('id', 'url', 'git_push_url', ('files', lambda files:
{fname: {k: v for k, v in about.items() if k != 'content'} for fname,
about in files.items()}), 'public', 'html_url', ('owner', 'login'),
'description', 'created_at', 'updated_at', 'comments', ('fork_of', 'id'
), ('forks', 'id'))
issue_info = show_fields(('assignees', 'login'), 'closed_at', ('closed_by',
'login'), 'comments', 'created_at', 'html_url', 'id', ('labels', 'name'
), 'locked', ('milestone', 'title'), 'number', 'state', 'title',
'updated_at', 'url', ('user', 'login'), 'repository_url')
<|reserved_special_token_1|>
from collections.abc import Iterator
import json
import click
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False),
err=err)
def show_fields(*fields):
def show(obj, verbose=False):
if verbose:
return obj
about = {}
for entry in fields:
if isinstance(entry, str):
entry = (entry,)
name, *subpath = entry
try:
value = obj[name]
except KeyError:
continue
for sp in subpath:
if value is None:
break
elif callable(sp):
value = sp(value)
elif isinstance(value, list):
value = [v and v[sp] for v in value]
else:
value = value[sp]
about[name] = value
return about
return show
repo_info = show_fields(
("owner", "login"),
"name",
"url",
"html_url",
"clone_url",
"git_url",
"ssh_url",
"full_name",
"description",
"homepage",
"private",
"default_branch",
"created_at",
"updated_at",
"pushed_at",
"fork",
"forks_count",
"watchers_count",
"size",
"subscribers_count",
"stargazers_count",
"id",
"language",
"network_count",
"open_issues_count",
("parent", "full_name"),
("source", "full_name"),
)
gist_info = show_fields(
"id",
"url",
"git_push_url",
("files", lambda files: {
fname: {k:v for k,v in about.items() if k != 'content'}
for fname, about in files.items()
}),
"public",
"html_url",
("owner", "login"),
"description",
"created_at",
"updated_at",
"comments",
("fork_of", "id"),
("forks", "id"),
)
issue_info = show_fields(
("assignees", "login"),
"closed_at",
("closed_by", "login"),
"comments",
"created_at",
"html_url",
"id",
("labels", "name"),
"locked",
("milestone", "title"),
"number",
"state",
"title",
"updated_at",
"url",
("user", "login"),
"repository_url",
### pull_request
)
|
flexible
|
{
"blob_id": "d340ac979f57cf4650131665e4fa5b9923f22a3e",
"index": 6691,
"step-1": "<mask token>\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\ndef show_fields(*fields):\n\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = entry,\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [(v and v[sp]) for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\ndef show_fields(*fields):\n\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = entry,\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [(v and v[sp]) for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\n\nrepo_info = show_fields(('owner', 'login'), 'name', 'url', 'html_url',\n 'clone_url', 'git_url', 'ssh_url', 'full_name', 'description',\n 'homepage', 'private', 'default_branch', 'created_at', 'updated_at',\n 'pushed_at', 'fork', 'forks_count', 'watchers_count', 'size',\n 'subscribers_count', 'stargazers_count', 'id', 'language',\n 'network_count', 'open_issues_count', ('parent', 'full_name'), (\n 'source', 'full_name'))\ngist_info = show_fields('id', 'url', 'git_push_url', ('files', lambda files:\n {fname: {k: v for k, v in about.items() if k != 'content'} for fname,\n about in files.items()}), 'public', 'html_url', ('owner', 'login'),\n 'description', 'created_at', 'updated_at', 'comments', ('fork_of', 'id'\n ), ('forks', 'id'))\nissue_info = show_fields(('assignees', 'login'), 'closed_at', ('closed_by',\n 'login'), 'comments', 'created_at', 'html_url', 'id', ('labels', 'name'\n ), 'locked', ('milestone', 'title'), 'number', 'state', 'title',\n 'updated_at', 'url', ('user', 'login'), 'repository_url')\n",
"step-4": "from collections.abc import Iterator\nimport json\nimport click\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\ndef show_fields(*fields):\n\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = entry,\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [(v and v[sp]) for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\n\nrepo_info = show_fields(('owner', 'login'), 'name', 'url', 'html_url',\n 'clone_url', 'git_url', 'ssh_url', 'full_name', 'description',\n 'homepage', 'private', 'default_branch', 'created_at', 'updated_at',\n 'pushed_at', 'fork', 'forks_count', 'watchers_count', 'size',\n 'subscribers_count', 'stargazers_count', 'id', 'language',\n 'network_count', 'open_issues_count', ('parent', 'full_name'), (\n 'source', 'full_name'))\ngist_info = show_fields('id', 'url', 'git_push_url', ('files', lambda files:\n {fname: {k: v for k, v in about.items() if k != 'content'} for fname,\n about in files.items()}), 'public', 'html_url', ('owner', 'login'),\n 'description', 'created_at', 'updated_at', 'comments', ('fork_of', 'id'\n ), ('forks', 'id'))\nissue_info = show_fields(('assignees', 'login'), 'closed_at', ('closed_by',\n 'login'), 'comments', 'created_at', 'html_url', 'id', ('labels', 'name'\n ), 'locked', ('milestone', 'title'), 'number', 'state', 'title',\n 'updated_at', 'url', ('user', 'login'), 'repository_url')\n",
"step-5": "from collections.abc import Iterator\nimport json\nimport click\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False),\n err=err)\n\ndef show_fields(*fields):\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = (entry,)\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [v and v[sp] for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\nrepo_info = show_fields(\n (\"owner\", \"login\"),\n \"name\",\n \"url\",\n \"html_url\",\n \"clone_url\",\n \"git_url\",\n \"ssh_url\",\n \"full_name\",\n \"description\",\n \"homepage\",\n \"private\",\n \"default_branch\",\n \"created_at\",\n \"updated_at\",\n \"pushed_at\",\n \"fork\",\n \"forks_count\",\n \"watchers_count\",\n \"size\",\n \"subscribers_count\",\n \"stargazers_count\",\n \"id\",\n \"language\",\n \"network_count\",\n \"open_issues_count\",\n (\"parent\", \"full_name\"),\n (\"source\", \"full_name\"),\n)\n\ngist_info = show_fields(\n \"id\",\n \"url\",\n \"git_push_url\",\n (\"files\", lambda files: {\n fname: {k:v for k,v in about.items() if k != 'content'}\n for fname, about in files.items()\n }),\n \"public\",\n \"html_url\",\n (\"owner\", \"login\"),\n \"description\",\n \"created_at\",\n \"updated_at\",\n \"comments\",\n (\"fork_of\", \"id\"),\n (\"forks\", \"id\"),\n)\n\nissue_info = show_fields(\n (\"assignees\", \"login\"),\n \"closed_at\",\n (\"closed_by\", \"login\"),\n \"comments\",\n \"created_at\",\n \"html_url\",\n \"id\",\n (\"labels\", \"name\"),\n \"locked\",\n (\"milestone\", \"title\"),\n \"number\",\n \"state\",\n \"title\",\n \"updated_at\",\n \"url\",\n (\"user\", \"login\"),\n \"repository_url\",\n ### pull_request\n)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#CALCULATE NUMBER OF UPPER AND LOWER CASES
def cnt():
s1=input("enter a string :").strip()
count=0
countu=0
for i in s1:
if(i.islower()):
count+=1
elif(i.isupper()):
countu+=1
else:
pass
print("THE NUMBER OF UPPER CASES ARE :",countu)
print("THE NUMBER OF LOWER CASSES ARE: ",count)
cnt()
|
normal
|
{
"blob_id": "6cfda09f360aaa560011b91db8316e5e3889eea1",
"index": 2017,
"step-1": "<mask token>\n",
"step-2": "def cnt():\n s1 = input('enter a string :').strip()\n count = 0\n countu = 0\n for i in s1:\n if i.islower():\n count += 1\n elif i.isupper():\n countu += 1\n else:\n pass\n print('THE NUMBER OF UPPER CASES ARE :', countu)\n print('THE NUMBER OF LOWER CASSES ARE: ', count)\n cnt()\n",
"step-3": "#CALCULATE NUMBER OF UPPER AND LOWER CASES\r\ndef cnt():\r\n \r\n s1=input(\"enter a string :\").strip()\r\n count=0\r\n countu=0\r\n for i in s1:\r\n if(i.islower()):\r\n count+=1\r\n \r\n elif(i.isupper()):\r\n countu+=1\r\n \r\n else:\r\n pass\r\n print(\"THE NUMBER OF UPPER CASES ARE :\",countu)\r\n print(\"THE NUMBER OF LOWER CASSES ARE: \",count)\r\n cnt()\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# 체크는 오른쪽+아래로만 체크합니다.
def check22(y, x, board) :
dirs = [[0,1], [1,0], [1,1]]
ret = [(y,x)]
for d in dirs :
dy, dx = y+d[0], x+d[1]
if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) :
return False
else :
ret.append((dy,dx))
return ret # 나중에 한 번에 삭제될 거임
def dropdown(board) :
for x in range(len(board[0])) :
cnt = 0
movable = False
for y in range(len(board)-1, -1, -1) :
# if y == len(board)-1 :
# if board[y][x] == '0' : break
if board[y][x] == '0' :
cnt += 1
movable = True
if board[y][x] != '0' and movable :
# 위에 떠있는 블록임. cnt만큼 내리면 됨
board[y+cnt][x] = board[y][x]
board[y][x] = '0'
return board
def deleteBoard(delete, board) :
for delNode in delete :
board[delNode[0]][delNode[1]] = '0'
return board
def solution(m, n, board):
answer = 0
for i in range(len(board)) :
board[i] = list(board[i])
while True :
delete = set([])
for y in range(len(board)) :
for x in range(len(board[0])) :
tmp = check22(y, x, board)
if tmp :
delete |= set(tmp)
delete = list(delete)
if not delete : break
answer += len(delete)
board = deleteBoard(delete, board)
# print(board)
board = dropdown(board)
# print(board)
return answer
|
normal
|
{
"blob_id": "938c4325480608b904bfbe0b11c081166aad694b",
"index": 7291,
"step-1": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n",
"step-2": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\n<mask token>\n",
"step-3": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\n<mask token>\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\ndef solution(m, n, board):\n answer = 0\n for i in range(len(board)):\n board[i] = list(board[i])\n while True:\n delete = set([])\n for y in range(len(board)):\n for x in range(len(board[0])):\n tmp = check22(y, x, board)\n if tmp:\n delete |= set(tmp)\n delete = list(delete)\n if not delete:\n break\n answer += len(delete)\n board = deleteBoard(delete, board)\n board = dropdown(board)\n return answer\n",
"step-4": "def check22(y, x, board):\n dirs = [[0, 1], [1, 0], [1, 1]]\n ret = [(y, x)]\n for d in dirs:\n dy, dx = y + d[0], x + d[1]\n if not (0 <= dy < len(board) and 0 <= dx < len(board[0]) and board[\n dy][dx] != '0' and board[y][x] == board[dy][dx]):\n return False\n else:\n ret.append((dy, dx))\n return ret\n\n\ndef dropdown(board):\n for x in range(len(board[0])):\n cnt = 0\n movable = False\n for y in range(len(board) - 1, -1, -1):\n if board[y][x] == '0':\n cnt += 1\n movable = True\n if board[y][x] != '0' and movable:\n board[y + cnt][x] = board[y][x]\n board[y][x] = '0'\n return board\n\n\ndef deleteBoard(delete, board):\n for delNode in delete:\n board[delNode[0]][delNode[1]] = '0'\n return board\n\n\ndef solution(m, n, board):\n answer = 0\n for i in range(len(board)):\n board[i] = list(board[i])\n while True:\n delete = set([])\n for y in range(len(board)):\n for x in range(len(board[0])):\n tmp = check22(y, x, board)\n if tmp:\n delete |= set(tmp)\n delete = list(delete)\n if not delete:\n break\n answer += len(delete)\n board = deleteBoard(delete, board)\n board = dropdown(board)\n return answer\n",
"step-5": "# 체크는 오른쪽+아래로만 체크합니다.\ndef check22(y, x, board) : \n \n dirs = [[0,1], [1,0], [1,1]]\n \n ret = [(y,x)]\n for d in dirs :\n dy, dx = y+d[0], x+d[1]\n if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) :\n return False\n else :\n ret.append((dy,dx))\n\n return ret # 나중에 한 번에 삭제될 거임\n\ndef dropdown(board) :\n \n for x in range(len(board[0])) :\n cnt = 0\n movable = False\n for y in range(len(board)-1, -1, -1) :\n # if y == len(board)-1 :\n # if board[y][x] == '0' : break\n if board[y][x] == '0' :\n cnt += 1\n movable = True\n if board[y][x] != '0' and movable :\n # 위에 떠있는 블록임. cnt만큼 내리면 됨\n board[y+cnt][x] = board[y][x]\n board[y][x] = '0'\n \n return board\n \ndef deleteBoard(delete, board) :\n \n for delNode in delete :\n board[delNode[0]][delNode[1]] = '0'\n \n return board\n\ndef solution(m, n, board):\n answer = 0\n \n for i in range(len(board)) :\n board[i] = list(board[i])\n \n \n while True :\n \n delete = set([])\n \n for y in range(len(board)) :\n for x in range(len(board[0])) :\n tmp = check22(y, x, board)\n if tmp :\n delete |= set(tmp)\n \n delete = list(delete)\n if not delete : break\n \n answer += len(delete)\n \n board = deleteBoard(delete, board)\n # print(board)\n board = dropdown(board)\n # print(board)\n \n return answer\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Dependancies
import pandas as pd
# We can use the read_html function in Pandas
# to automatically scrape any tabular data from a page.
# URL of website to scrape
url = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'
# Read HTML
tables = pd.read_html(url)
tables
# What we get in return is a list of dataframes for any tabular data that Pandas found.
# We can slice off any of those dataframes that we want using normal indexing.
# Select first table as df
df = tables[0]
# Establish columns
df.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',
'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',
'Metropolitan Population', 'Population Rank', 'Notes']
# Display
df.head()
# Cleanup of extra rows
df = df.iloc[2:]
df.head()
# Set the index to the State column
df.set_index('State', inplace=True)
df.head()
# That way we can display all info about a row
df.loc['Alabama']
# Pandas also had a to_html method that we can use to generate HTML tables from DataFrames.
html_table = df.to_html()
html_table
# You may have to strip unwanted newlines to clean up the table.
html_table.replace('\n', '')
# You can also save the table directly to a file.
df.to_html('table.html')
|
normal
|
{
"blob_id": "f4fca5ce20db0e27da11d76a7a2fd402c33d2e92",
"index": 4731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntables\n<mask token>\ndf.head()\n<mask token>\ndf.head()\ndf.set_index('State', inplace=True)\ndf.head()\ndf.loc['Alabama']\n<mask token>\nhtml_table\nhtml_table.replace('\\n', '')\ndf.to_html('table.html')\n",
"step-3": "<mask token>\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\ntables = pd.read_html(url)\ntables\ndf = tables[0]\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',\n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',\n 'Metropolitan Population', 'Population Rank', 'Notes']\ndf.head()\ndf = df.iloc[2:]\ndf.head()\ndf.set_index('State', inplace=True)\ndf.head()\ndf.loc['Alabama']\nhtml_table = df.to_html()\nhtml_table\nhtml_table.replace('\\n', '')\ndf.to_html('table.html')\n",
"step-4": "import pandas as pd\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\ntables = pd.read_html(url)\ntables\ndf = tables[0]\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',\n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',\n 'Metropolitan Population', 'Population Rank', 'Notes']\ndf.head()\ndf = df.iloc[2:]\ndf.head()\ndf.set_index('State', inplace=True)\ndf.head()\ndf.loc['Alabama']\nhtml_table = df.to_html()\nhtml_table\nhtml_table.replace('\\n', '')\ndf.to_html('table.html')\n",
"step-5": "# Dependancies\nimport pandas as pd\n\n# We can use the read_html function in Pandas \n# to automatically scrape any tabular data from a page.\n\n# URL of website to scrape\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\n\n# Read HTML\ntables = pd.read_html(url)\ntables\n\n# What we get in return is a list of dataframes for any tabular data that Pandas found.\n# We can slice off any of those dataframes that we want using normal indexing.\n\n# Select first table as df\ndf = tables[0]\n\n# Establish columns\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital', \n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan', \n 'Metropolitan Population', 'Population Rank', 'Notes']\n# Display\ndf.head()\n\n# Cleanup of extra rows\ndf = df.iloc[2:]\ndf.head()\n\n# Set the index to the State column\ndf.set_index('State', inplace=True)\ndf.head()\n\n# That way we can display all info about a row\ndf.loc['Alabama']\n\n\n# Pandas also had a to_html method that we can use to generate HTML tables from DataFrames.\nhtml_table = df.to_html()\nhtml_table\n\n# You may have to strip unwanted newlines to clean up the table.\nhtml_table.replace('\\n', '')\n\n# You can also save the table directly to a file.\ndf.to_html('table.html')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
initCors(app)
initRoutes(app)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = FastAPI(debug=True, title='Recipe API')
initCors(app)
initRoutes(app)
<|reserved_special_token_1|>
from fastapi import FastAPI
from app.router.routes import initRoutes
from app.cors.cors import initCors
app = FastAPI(debug=True, title='Recipe API')
initCors(app)
initRoutes(app)
<|reserved_special_token_1|>
from fastapi import FastAPI
from app.router.routes import initRoutes
from app.cors.cors import initCors
app = FastAPI(debug=True,title="Recipe API")
initCors(app)
initRoutes(app)
|
flexible
|
{
"blob_id": "1857d76b8c68c58d2d721de529811a6aeb09fcbb",
"index": 5407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ninitCors(app)\ninitRoutes(app)\n",
"step-3": "<mask token>\napp = FastAPI(debug=True, title='Recipe API')\ninitCors(app)\ninitRoutes(app)\n",
"step-4": "from fastapi import FastAPI\nfrom app.router.routes import initRoutes\nfrom app.cors.cors import initCors\napp = FastAPI(debug=True, title='Recipe API')\ninitCors(app)\ninitRoutes(app)\n",
"step-5": "from fastapi import FastAPI\nfrom app.router.routes import initRoutes\nfrom app.cors.cors import initCors\n\napp = FastAPI(debug=True,title=\"Recipe API\")\ninitCors(app)\ninitRoutes(app)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(cosinus_real)
print(cosinus_imaginary)
print(sinus_real)
print(sinus_imag)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
z = 1.0j
cosinus_real = math.cos(z.real)
cosinus_imaginary = math.cos(z.imag)
sinus_real = math.sin(z.real)
sinus_imag = math.sin(z.imag)
print(cosinus_real)
print(cosinus_imaginary)
print(sinus_real)
print(sinus_imag)
<|reserved_special_token_1|>
import math
z = 1.0j
cosinus_real = math.cos(z.real)
cosinus_imaginary = math.cos(z.imag)
sinus_real = math.sin(z.real)
sinus_imag = math.sin(z.imag)
print(cosinus_real)
print(cosinus_imaginary)
print(sinus_real)
print(sinus_imag)
<|reserved_special_token_1|>
import math
z = 1j
cosinus_real = math.cos(z.real)
cosinus_imaginary = math.cos(z.imag)
sinus_real = math.sin(z.real)
sinus_imag = math.sin(z.imag)
print (cosinus_real)
print (cosinus_imaginary)
print (sinus_real)
print (sinus_imag)
|
flexible
|
{
"blob_id": "7ea608b73f592cffc7723b4319cf1a87b3e9b443",
"index": 4220,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-3": "<mask token>\nz = 1.0j\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-4": "import math\nz = 1.0j\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-5": "import math\n\nz = 1j\n\n\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\n\nprint (cosinus_real)\nprint (cosinus_imaginary)\nprint (sinus_real)\nprint (sinus_imag)\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Command(BaseCommand):
<|reserved_special_token_0|>
help = (
'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'
)
def handle(self, *args, **kwargs):
for spell in SPELLS:
spell_entry = Spell.objects.create(name=spell['name'], distance
=spell['range'], ritual=spell['ritual'])
if len(spell['classes']) > 1:
spell_entry.available_to = ''
for i in range(len(spell['classes'])):
spell_entry.available_to += spell['classes'][i].title(
) + ', '
else:
spell_entry.available_to = spell['classes'][0].title()
if 'components' in spell.keys():
spell_entry.somatic = spell['components']['somatic']
spell_entry.verbal = spell['components']['verbal']
spell_entry.material = spell['components']['material']
if spell_entry.material:
spell_entry.specific_materials = ''
for i in range(len(spell['components']['materials_needed'])
):
spell_entry.specific_materials += spell['components'][
'materials_needed'][i] + ', '
if 'description' in spell.keys():
spell_entry.description = spell['description']
dice_number = re.findall('\\d+(?=d)', spell['description'])
if len(dice_number) > 0:
spell_entry.damage_dice_number = dice_number[0]
dice_size = re.findall('(?<=d)\\d+', spell['description'])
if len(dice_size) > 0:
spell_entry.damage_dice_size = dice_size[0]
s_throw = re.findall('[A-Z]\\w+(?= saving throw)', spell[
'description'])
if len(s_throw) == 1:
s_throw = s_throw[0][:3].upper()
spell_entry.save_type = s_throw
if spell['level'] == 'cantrip':
spell_entry.level = 'Cantrip'
else:
spell_entry.level = SPELL_LEVELS[spell['level']]
if 'higher_levels' in spell.keys():
spell_entry.higher_level = spell['higher_levels']
if 'school' in spell.keys():
spell_entry.school = SPELL_SCHOOL[spell['school'].title()]
if 'casting_time' in spell.keys():
if 'reaction' in spell['casting_time']:
spell_entry.cast_time = CAST_TIME['1 Reaction']
else:
spell_entry.cast_time = spell['casting_time'].title()
if 'Concentration' in spell['duration']:
spell_entry.concentration = True
spell_entry.duration = spell['duration'][15:].title()
else:
spell_entry.concentration = False
spell_entry.duration = spell['duration']
spell_entry.save()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Command(BaseCommand):
"""Command to populate the database with all spells for 5th Edition."""
help = (
'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'
)
def handle(self, *args, **kwargs):
for spell in SPELLS:
spell_entry = Spell.objects.create(name=spell['name'], distance
=spell['range'], ritual=spell['ritual'])
if len(spell['classes']) > 1:
spell_entry.available_to = ''
for i in range(len(spell['classes'])):
spell_entry.available_to += spell['classes'][i].title(
) + ', '
else:
spell_entry.available_to = spell['classes'][0].title()
if 'components' in spell.keys():
spell_entry.somatic = spell['components']['somatic']
spell_entry.verbal = spell['components']['verbal']
spell_entry.material = spell['components']['material']
if spell_entry.material:
spell_entry.specific_materials = ''
for i in range(len(spell['components']['materials_needed'])
):
spell_entry.specific_materials += spell['components'][
'materials_needed'][i] + ', '
if 'description' in spell.keys():
spell_entry.description = spell['description']
dice_number = re.findall('\\d+(?=d)', spell['description'])
if len(dice_number) > 0:
spell_entry.damage_dice_number = dice_number[0]
dice_size = re.findall('(?<=d)\\d+', spell['description'])
if len(dice_size) > 0:
spell_entry.damage_dice_size = dice_size[0]
s_throw = re.findall('[A-Z]\\w+(?= saving throw)', spell[
'description'])
if len(s_throw) == 1:
s_throw = s_throw[0][:3].upper()
spell_entry.save_type = s_throw
if spell['level'] == 'cantrip':
spell_entry.level = 'Cantrip'
else:
spell_entry.level = SPELL_LEVELS[spell['level']]
if 'higher_levels' in spell.keys():
spell_entry.higher_level = spell['higher_levels']
if 'school' in spell.keys():
spell_entry.school = SPELL_SCHOOL[spell['school'].title()]
if 'casting_time' in spell.keys():
if 'reaction' in spell['casting_time']:
spell_entry.cast_time = CAST_TIME['1 Reaction']
else:
spell_entry.cast_time = spell['casting_time'].title()
if 'Concentration' in spell['duration']:
spell_entry.concentration = True
spell_entry.duration = spell['duration'][15:].title()
else:
spell_entry.concentration = False
spell_entry.duration = spell['duration']
spell_entry.save()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SPELL_SCHOOL = {'Abjuration': 'Abjuration', 'Conjuration': 'Conjuration',
'Divination': 'Divination', 'Enchantment': 'Enchantment', 'Evocation':
'Evocation', 'Illusion': 'Illusion', 'Necromancy': 'Necromancy',
'Transmutation': 'Transmutation'}
CAST_TIME = {'1 Action': '1 Action', '1 Bonus Action': '1 Bonus Action',
'1 Reaction': '1 Reaction', '1 Minute': '1 Minute', '10 Minutes':
'10 Minutes', '1 Hour': '1 Hour', '8 Hours': '8 Hours', '12 Hours':
'12 Hours', '24 Hours': '24 Hours', '1 Action or 8 Hours':
'1 Action or 8 Hours'}
SPELL_LEVELS = {'Cantrip': 'Cantrip', '1': '1st-level', '2': '2nd-level',
'3': '3rd-level', '4': '4th-level', '5': '5th-level', '6': '6th-level',
'7': '7th-level', '8': '8th-level', '9': '9th-level'}
class Command(BaseCommand):
"""Command to populate the database with all spells for 5th Edition."""
help = (
'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'
)
def handle(self, *args, **kwargs):
for spell in SPELLS:
spell_entry = Spell.objects.create(name=spell['name'], distance
=spell['range'], ritual=spell['ritual'])
if len(spell['classes']) > 1:
spell_entry.available_to = ''
for i in range(len(spell['classes'])):
spell_entry.available_to += spell['classes'][i].title(
) + ', '
else:
spell_entry.available_to = spell['classes'][0].title()
if 'components' in spell.keys():
spell_entry.somatic = spell['components']['somatic']
spell_entry.verbal = spell['components']['verbal']
spell_entry.material = spell['components']['material']
if spell_entry.material:
spell_entry.specific_materials = ''
for i in range(len(spell['components']['materials_needed'])
):
spell_entry.specific_materials += spell['components'][
'materials_needed'][i] + ', '
if 'description' in spell.keys():
spell_entry.description = spell['description']
dice_number = re.findall('\\d+(?=d)', spell['description'])
if len(dice_number) > 0:
spell_entry.damage_dice_number = dice_number[0]
dice_size = re.findall('(?<=d)\\d+', spell['description'])
if len(dice_size) > 0:
spell_entry.damage_dice_size = dice_size[0]
s_throw = re.findall('[A-Z]\\w+(?= saving throw)', spell[
'description'])
if len(s_throw) == 1:
s_throw = s_throw[0][:3].upper()
spell_entry.save_type = s_throw
if spell['level'] == 'cantrip':
spell_entry.level = 'Cantrip'
else:
spell_entry.level = SPELL_LEVELS[spell['level']]
if 'higher_levels' in spell.keys():
spell_entry.higher_level = spell['higher_levels']
if 'school' in spell.keys():
spell_entry.school = SPELL_SCHOOL[spell['school'].title()]
if 'casting_time' in spell.keys():
if 'reaction' in spell['casting_time']:
spell_entry.cast_time = CAST_TIME['1 Reaction']
else:
spell_entry.cast_time = spell['casting_time'].title()
if 'Concentration' in spell['duration']:
spell_entry.concentration = True
spell_entry.duration = spell['duration'][15:].title()
else:
spell_entry.concentration = False
spell_entry.duration = spell['duration']
spell_entry.save()
<|reserved_special_token_1|>
import re
from django.core.management.base import BaseCommand
from utils.spells import SPELLS
from spells.models import Spell
SPELL_SCHOOL = {'Abjuration': 'Abjuration', 'Conjuration': 'Conjuration',
'Divination': 'Divination', 'Enchantment': 'Enchantment', 'Evocation':
'Evocation', 'Illusion': 'Illusion', 'Necromancy': 'Necromancy',
'Transmutation': 'Transmutation'}
CAST_TIME = {'1 Action': '1 Action', '1 Bonus Action': '1 Bonus Action',
'1 Reaction': '1 Reaction', '1 Minute': '1 Minute', '10 Minutes':
'10 Minutes', '1 Hour': '1 Hour', '8 Hours': '8 Hours', '12 Hours':
'12 Hours', '24 Hours': '24 Hours', '1 Action or 8 Hours':
'1 Action or 8 Hours'}
SPELL_LEVELS = {'Cantrip': 'Cantrip', '1': '1st-level', '2': '2nd-level',
'3': '3rd-level', '4': '4th-level', '5': '5th-level', '6': '6th-level',
'7': '7th-level', '8': '8th-level', '9': '9th-level'}
class Command(BaseCommand):
"""Command to populate the database with all spells for 5th Edition."""
help = (
'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'
)
def handle(self, *args, **kwargs):
for spell in SPELLS:
spell_entry = Spell.objects.create(name=spell['name'], distance
=spell['range'], ritual=spell['ritual'])
if len(spell['classes']) > 1:
spell_entry.available_to = ''
for i in range(len(spell['classes'])):
spell_entry.available_to += spell['classes'][i].title(
) + ', '
else:
spell_entry.available_to = spell['classes'][0].title()
if 'components' in spell.keys():
spell_entry.somatic = spell['components']['somatic']
spell_entry.verbal = spell['components']['verbal']
spell_entry.material = spell['components']['material']
if spell_entry.material:
spell_entry.specific_materials = ''
for i in range(len(spell['components']['materials_needed'])
):
spell_entry.specific_materials += spell['components'][
'materials_needed'][i] + ', '
if 'description' in spell.keys():
spell_entry.description = spell['description']
dice_number = re.findall('\\d+(?=d)', spell['description'])
if len(dice_number) > 0:
spell_entry.damage_dice_number = dice_number[0]
dice_size = re.findall('(?<=d)\\d+', spell['description'])
if len(dice_size) > 0:
spell_entry.damage_dice_size = dice_size[0]
s_throw = re.findall('[A-Z]\\w+(?= saving throw)', spell[
'description'])
if len(s_throw) == 1:
s_throw = s_throw[0][:3].upper()
spell_entry.save_type = s_throw
if spell['level'] == 'cantrip':
spell_entry.level = 'Cantrip'
else:
spell_entry.level = SPELL_LEVELS[spell['level']]
if 'higher_levels' in spell.keys():
spell_entry.higher_level = spell['higher_levels']
if 'school' in spell.keys():
spell_entry.school = SPELL_SCHOOL[spell['school'].title()]
if 'casting_time' in spell.keys():
if 'reaction' in spell['casting_time']:
spell_entry.cast_time = CAST_TIME['1 Reaction']
else:
spell_entry.cast_time = spell['casting_time'].title()
if 'Concentration' in spell['duration']:
spell_entry.concentration = True
spell_entry.duration = spell['duration'][15:].title()
else:
spell_entry.concentration = False
spell_entry.duration = spell['duration']
spell_entry.save()
<|reserved_special_token_1|>
# python imports
import re
# django imports
from django.core.management.base import BaseCommand
# module level imports
from utils.spells import SPELLS
from spells.models import Spell
SPELL_SCHOOL = {
'Abjuration': 'Abjuration',
'Conjuration': 'Conjuration',
'Divination': 'Divination',
'Enchantment': 'Enchantment',
'Evocation': 'Evocation',
'Illusion': 'Illusion',
'Necromancy': 'Necromancy',
'Transmutation': 'Transmutation',
}
CAST_TIME = {
'1 Action': '1 Action',
'1 Bonus Action': '1 Bonus Action',
'1 Reaction': '1 Reaction',
'1 Minute': '1 Minute',
'10 Minutes': '10 Minutes',
'1 Hour': '1 Hour',
'8 Hours': '8 Hours',
'12 Hours': '12 Hours',
'24 Hours': '24 Hours',
'1 Action or 8 Hours': '1 Action or 8 Hours',
}
SPELL_LEVELS = {
'Cantrip': 'Cantrip',
'1': '1st-level',
'2': '2nd-level',
'3': '3rd-level',
'4': '4th-level',
'5': '5th-level',
'6': '6th-level',
'7': '7th-level',
'8': '8th-level',
'9': '9th-level',
}
class Command(BaseCommand):
"""Command to populate the database with all spells for 5th Edition."""
# args
help = 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'
def handle(self, *args, **kwargs):
for spell in SPELLS:
spell_entry = Spell.objects.create(
name=spell['name'],
distance=spell['range'],
ritual=spell['ritual'],
)
if len(spell['classes']) > 1:
spell_entry.available_to = ''
for i in range(len(spell['classes'])):
spell_entry.available_to += spell['classes'][i].title() + ', '
else:
spell_entry.available_to = spell['classes'][0].title()
if 'components' in spell.keys():
spell_entry.somatic = spell['components']['somatic']
spell_entry.verbal = spell['components']['verbal']
spell_entry.material = spell['components']['material']
if spell_entry.material:
spell_entry.specific_materials = ''
for i in range(len(spell['components']['materials_needed'])):
spell_entry.specific_materials += spell['components']['materials_needed'][i] + ', '
if 'description' in spell.keys():
spell_entry.description = spell['description']
dice_number = re.findall(r'\d+(?=d)', spell['description'])
if len(dice_number) > 0:
spell_entry.damage_dice_number = dice_number[0]
dice_size = re.findall(r'(?<=d)\d+', spell['description'])
if len(dice_size) > 0:
spell_entry.damage_dice_size = dice_size[0]
s_throw = re.findall(r"[A-Z]\w+(?= saving throw)", spell['description'])
if len(s_throw) == 1:
s_throw = s_throw[0][:3].upper()
spell_entry.save_type = s_throw
if spell['level'] == 'cantrip':
spell_entry.level = 'Cantrip'
else:
spell_entry.level = SPELL_LEVELS[spell['level']]
if 'higher_levels' in spell.keys():
spell_entry.higher_level = spell['higher_levels']
if 'school' in spell.keys():
spell_entry.school = SPELL_SCHOOL[spell['school'].title()]
if 'casting_time' in spell.keys():
if 'reaction' in spell['casting_time']:
spell_entry.cast_time = CAST_TIME['1 Reaction']
else:
spell_entry.cast_time = spell['casting_time'].title()
if 'Concentration' in spell['duration']:
spell_entry.concentration = True
spell_entry.duration = spell['duration'][15:].title()
else:
spell_entry.concentration = False
spell_entry.duration = spell['duration']
spell_entry.save()
|
flexible
|
{
"blob_id": "010f78d952657b3d7c11fbf8e46912d0294f6cc1",
"index": 9103,
"step-1": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-3": "<mask token>\nSPELL_SCHOOL = {'Abjuration': 'Abjuration', 'Conjuration': 'Conjuration',\n 'Divination': 'Divination', 'Enchantment': 'Enchantment', 'Evocation':\n 'Evocation', 'Illusion': 'Illusion', 'Necromancy': 'Necromancy',\n 'Transmutation': 'Transmutation'}\nCAST_TIME = {'1 Action': '1 Action', '1 Bonus Action': '1 Bonus Action',\n '1 Reaction': '1 Reaction', '1 Minute': '1 Minute', '10 Minutes':\n '10 Minutes', '1 Hour': '1 Hour', '8 Hours': '8 Hours', '12 Hours':\n '12 Hours', '24 Hours': '24 Hours', '1 Action or 8 Hours':\n '1 Action or 8 Hours'}\nSPELL_LEVELS = {'Cantrip': 'Cantrip', '1': '1st-level', '2': '2nd-level',\n '3': '3rd-level', '4': '4th-level', '5': '5th-level', '6': '6th-level',\n '7': '7th-level', '8': '8th-level', '9': '9th-level'}\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-4": "import re\nfrom django.core.management.base import BaseCommand\nfrom utils.spells import SPELLS\nfrom spells.models import Spell\nSPELL_SCHOOL = {'Abjuration': 'Abjuration', 'Conjuration': 'Conjuration',\n 'Divination': 'Divination', 'Enchantment': 'Enchantment', 'Evocation':\n 'Evocation', 'Illusion': 'Illusion', 'Necromancy': 'Necromancy',\n 'Transmutation': 'Transmutation'}\nCAST_TIME = {'1 Action': '1 Action', '1 Bonus Action': '1 Bonus Action',\n '1 Reaction': '1 Reaction', '1 Minute': '1 Minute', '10 Minutes':\n '10 Minutes', '1 Hour': '1 Hour', '8 Hours': '8 Hours', '12 Hours':\n '12 Hours', '24 Hours': '24 Hours', '1 Action or 8 Hours':\n '1 Action or 8 Hours'}\nSPELL_LEVELS = {'Cantrip': 'Cantrip', '1': '1st-level', '2': '2nd-level',\n '3': '3rd-level', '4': '4th-level', '5': '5th-level', '6': '6th-level',\n '7': '7th-level', '8': '8th-level', '9': '9th-level'}\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n help = (\n 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n )\n\n def handle(self, *args, **kwargs):\n for spell in SPELLS:\n spell_entry = Spell.objects.create(name=spell['name'], distance\n =spell['range'], ritual=spell['ritual'])\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title(\n ) + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])\n ):\n spell_entry.specific_materials += spell['components'][\n 'materials_needed'][i] + ', '\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n dice_number = re.findall('\\\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n dice_size = re.findall('(?<=d)\\\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n s_throw = re.findall('[A-Z]\\\\w+(?= saving throw)', spell[\n 'description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n spell_entry.save()\n",
"step-5": "# python imports\nimport re\n\n# django imports\nfrom django.core.management.base import BaseCommand\n\n# module level imports\nfrom utils.spells import SPELLS\nfrom spells.models import Spell\n\nSPELL_SCHOOL = {\n 'Abjuration': 'Abjuration',\n 'Conjuration': 'Conjuration',\n 'Divination': 'Divination',\n 'Enchantment': 'Enchantment',\n 'Evocation': 'Evocation',\n 'Illusion': 'Illusion',\n 'Necromancy': 'Necromancy',\n 'Transmutation': 'Transmutation',\n}\n\nCAST_TIME = {\n '1 Action': '1 Action',\n '1 Bonus Action': '1 Bonus Action',\n '1 Reaction': '1 Reaction',\n '1 Minute': '1 Minute',\n '10 Minutes': '10 Minutes',\n '1 Hour': '1 Hour',\n '8 Hours': '8 Hours',\n '12 Hours': '12 Hours',\n '24 Hours': '24 Hours',\n '1 Action or 8 Hours': '1 Action or 8 Hours',\n}\n\nSPELL_LEVELS = {\n 'Cantrip': 'Cantrip',\n '1': '1st-level',\n '2': '2nd-level',\n '3': '3rd-level',\n '4': '4th-level',\n '5': '5th-level',\n '6': '6th-level',\n '7': '7th-level',\n '8': '8th-level',\n '9': '9th-level',\n}\n\n\nclass Command(BaseCommand):\n \"\"\"Command to populate the database with all spells for 5th Edition.\"\"\"\n\n # args\n help = 'Will auto populate the database with all the Spells from 5th Edition Dungeons and Dragons.'\n\n def handle(self, *args, **kwargs):\n\n for spell in SPELLS:\n spell_entry = Spell.objects.create(\n name=spell['name'],\n distance=spell['range'],\n ritual=spell['ritual'],\n )\n\n if len(spell['classes']) > 1:\n spell_entry.available_to = ''\n for i in range(len(spell['classes'])):\n spell_entry.available_to += spell['classes'][i].title() + ', '\n else:\n spell_entry.available_to = spell['classes'][0].title()\n\n if 'components' in spell.keys():\n spell_entry.somatic = spell['components']['somatic']\n spell_entry.verbal = spell['components']['verbal']\n spell_entry.material = spell['components']['material']\n\n if spell_entry.material:\n spell_entry.specific_materials = ''\n for i in range(len(spell['components']['materials_needed'])):\n spell_entry.specific_materials += spell['components']['materials_needed'][i] + ', '\n\n if 'description' in spell.keys():\n spell_entry.description = spell['description']\n\n dice_number = re.findall(r'\\d+(?=d)', spell['description'])\n if len(dice_number) > 0:\n spell_entry.damage_dice_number = dice_number[0]\n\n dice_size = re.findall(r'(?<=d)\\d+', spell['description'])\n if len(dice_size) > 0:\n spell_entry.damage_dice_size = dice_size[0]\n\n s_throw = re.findall(r\"[A-Z]\\w+(?= saving throw)\", spell['description'])\n if len(s_throw) == 1:\n s_throw = s_throw[0][:3].upper()\n spell_entry.save_type = s_throw\n\n if spell['level'] == 'cantrip':\n spell_entry.level = 'Cantrip'\n else:\n spell_entry.level = SPELL_LEVELS[spell['level']]\n\n if 'higher_levels' in spell.keys():\n spell_entry.higher_level = spell['higher_levels']\n\n if 'school' in spell.keys():\n spell_entry.school = SPELL_SCHOOL[spell['school'].title()]\n\n if 'casting_time' in spell.keys():\n if 'reaction' in spell['casting_time']:\n spell_entry.cast_time = CAST_TIME['1 Reaction']\n\n else:\n spell_entry.cast_time = spell['casting_time'].title()\n\n if 'Concentration' in spell['duration']:\n spell_entry.concentration = True\n spell_entry.duration = spell['duration'][15:].title()\n else:\n spell_entry.concentration = False\n spell_entry.duration = spell['duration']\n\n spell_entry.save()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import tkinter as tk
from tkinter import Tk, ttk
from tkinter import filedialog
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import crystalpeaktab as cp
import smallangletab as sa
matplotlib.use("TkAgg")
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title("University of Utah XRD Analysis Multi-tool")
#Sets up tabs
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text="Crystallization Peak Fit")
self.tab_parent.add(self.tab2, text="Small Angle Simulation")
self.tab_parent.grid(row=1, column=0)
# Spacers
tk.Label(self.master, text="").grid(row=2, column=3)
# Sets the first tab to be the crystal peak analysis
cp.tab(self.tab1)
# Sets the second tab to be the Small Angle Analytic Simulation
sa.tab(self.tab2)
# ======================================================================================================================
# ======================================================================================================================
# MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN
# ======================================================================================================================
root = tk.Tk()
my_gui = mainwin(root)
root.mainloop()
# ======================================================================================================================
# ======================================================================================================================
|
normal
|
{
"blob_id": "137ed9c36265781dbebabbd1ee0ea84c9850201a",
"index": 1642,
"step-1": "<mask token>\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('TkAgg')\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\n<mask token>\nroot.mainloop()\n",
"step-3": "<mask token>\nmatplotlib.use('TkAgg')\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\nroot = tk.Tk()\nmy_gui = mainwin(root)\nroot.mainloop()\n",
"step-4": "import tkinter as tk\nfrom tkinter import Tk, ttk\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport crystalpeaktab as cp\nimport smallangletab as sa\nmatplotlib.use('TkAgg')\n\n\nclass mainwin:\n\n def __init__(self, master):\n self.master = master\n master.title\n master.title('University of Utah XRD Analysis Multi-tool')\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')\n self.tab_parent.add(self.tab2, text='Small Angle Simulation')\n self.tab_parent.grid(row=1, column=0)\n tk.Label(self.master, text='').grid(row=2, column=3)\n cp.tab(self.tab1)\n sa.tab(self.tab2)\n\n\nroot = tk.Tk()\nmy_gui = mainwin(root)\nroot.mainloop()\n",
"step-5": "import tkinter as tk\nfrom tkinter import Tk, ttk\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport crystalpeaktab as cp\nimport smallangletab as sa\nmatplotlib.use(\"TkAgg\")\n\nclass mainwin:\n def __init__(self, master):\n self.master = master\n master.title\n master.title(\"University of Utah XRD Analysis Multi-tool\")\n #Sets up tabs\n self.tab_parent = ttk.Notebook(master)\n self.tab1 = ttk.Frame(self.tab_parent)\n self.tab2 = ttk.Frame(self.tab_parent)\n self.tab3 = ttk.Frame(self.tab_parent)\n self.tab_parent.add(self.tab1, text=\"Crystallization Peak Fit\")\n self.tab_parent.add(self.tab2, text=\"Small Angle Simulation\")\n self.tab_parent.grid(row=1, column=0)\n # Spacers\n tk.Label(self.master, text=\"\").grid(row=2, column=3)\n # Sets the first tab to be the crystal peak analysis\n cp.tab(self.tab1)\n # Sets the second tab to be the Small Angle Analytic Simulation\n sa.tab(self.tab2)\n# ======================================================================================================================\n# ======================================================================================================================\n# MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN\n# ======================================================================================================================\nroot = tk.Tk()\nmy_gui = mainwin(root)\nroot.mainloop()\n# ======================================================================================================================\n# ======================================================================================================================\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Dict={0:0, 1:1}
def fibo(n):
if n not in Dict:
val=fibo(n-1)+fibo(n-2)
Dict[n]=val
return Dict[n]
n=int(input("Enter the value of n:"))
print("Fibonacci(", n,")= ", fibo(n))
# uncomment to take input from the user
nterms = int(input("How many terms? "))
# check if the number of terms is valid
if nterms <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(nterms):```
print(fibo(i), end=" , ")
|
normal
|
{
"blob_id": "5a1c4cc572431f89709d20296d43e8d889e8c5b0",
"index": 5180,
"step-1": "Dict={0:0, 1:1}\ndef fibo(n):\n if n not in Dict:\n val=fibo(n-1)+fibo(n-2)\n Dict[n]=val\n return Dict[n]\nn=int(input(\"Enter the value of n:\"))\nprint(\"Fibonacci(\", n,\")= \", fibo(n))\n\n# uncomment to take input from the user\nnterms = int(input(\"How many terms? \"))\n\n# check if the number of terms is valid\nif nterms <= 0:\n print(\"Plese enter a positive integer\")\nelse:\n print(\"Fibonacci sequence:\")\n for i in range(nterms):```\n print(fibo(i), end=\" , \")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def classify_question(query):
try:
"""
Get answer-type from google autoML classifier
(by making POST requests with authorization key)
"""
question_classifier = GoogleQuestionClassifier()
answer_type = question_classifier.classify_by_api_call(query)
except KeyError:
"""
Get answer-type from google autoML classifier
(without authorization key by using google package)
"""
answer_type = question_classifier.classify_by_package(query)
except:
"""
Get answer-type from custom question classifier
"""
from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier
question_classifier = CustomQuestionClassifier()
answer_type = question_classifier.classify_question(query)[0]
return answer_type
<|reserved_special_token_1|>
from QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier
def classify_question(query):
try:
"""
Get answer-type from google autoML classifier
(by making POST requests with authorization key)
"""
question_classifier = GoogleQuestionClassifier()
answer_type = question_classifier.classify_by_api_call(query)
except KeyError:
"""
Get answer-type from google autoML classifier
(without authorization key by using google package)
"""
answer_type = question_classifier.classify_by_package(query)
except:
"""
Get answer-type from custom question classifier
"""
from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier
question_classifier = CustomQuestionClassifier()
answer_type = question_classifier.classify_question(query)[0]
return answer_type
<|reserved_special_token_1|>
from QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier
def classify_question(query):
try:
"""
Get answer-type from google autoML classifier
(by making POST requests with authorization key)
"""
question_classifier = GoogleQuestionClassifier()
answer_type = question_classifier.classify_by_api_call(query)
except KeyError :
"""
Get answer-type from google autoML classifier
(without authorization key by using google package)
"""
answer_type = question_classifier.classify_by_package(query)
except:
"""
Get answer-type from custom question classifier
"""
from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier
question_classifier = CustomQuestionClassifier()
answer_type = question_classifier.classify_question(query)[0]
return answer_type
# print (classify_question("How many seasons are there in a year"))
|
flexible
|
{
"blob_id": "db231ea92319414dd10ca8dfbc14e5a70ed2fe44",
"index": 7343,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef classify_question(query):\n try:\n \"\"\"\n Get answer-type from google autoML classifier \n (by making POST requests with authorization key)\n \"\"\"\n question_classifier = GoogleQuestionClassifier()\n answer_type = question_classifier.classify_by_api_call(query)\n except KeyError:\n \"\"\"\n Get answer-type from google autoML classifier \n (without authorization key by using google package)\n \"\"\"\n answer_type = question_classifier.classify_by_package(query)\n except:\n \"\"\"\n Get answer-type from custom question classifier\n \"\"\"\n from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier\n question_classifier = CustomQuestionClassifier()\n answer_type = question_classifier.classify_question(query)[0]\n return answer_type\n",
"step-3": "from QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier\n\n\ndef classify_question(query):\n try:\n \"\"\"\n Get answer-type from google autoML classifier \n (by making POST requests with authorization key)\n \"\"\"\n question_classifier = GoogleQuestionClassifier()\n answer_type = question_classifier.classify_by_api_call(query)\n except KeyError:\n \"\"\"\n Get answer-type from google autoML classifier \n (without authorization key by using google package)\n \"\"\"\n answer_type = question_classifier.classify_by_package(query)\n except:\n \"\"\"\n Get answer-type from custom question classifier\n \"\"\"\n from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier\n question_classifier = CustomQuestionClassifier()\n answer_type = question_classifier.classify_question(query)[0]\n return answer_type\n",
"step-4": " \r\n \r\nfrom QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier\r\n \r\ndef classify_question(query):\r\n \r\n try:\r\n \"\"\"\r\n Get answer-type from google autoML classifier \r\n (by making POST requests with authorization key)\r\n \"\"\"\r\n question_classifier = GoogleQuestionClassifier()\r\n answer_type = question_classifier.classify_by_api_call(query)\r\n except KeyError :\r\n \"\"\"\r\n Get answer-type from google autoML classifier \r\n (without authorization key by using google package)\r\n \"\"\"\r\n answer_type = question_classifier.classify_by_package(query)\r\n \r\n except:\r\n \"\"\"\r\n Get answer-type from custom question classifier\r\n \"\"\"\r\n from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier\r\n question_classifier = CustomQuestionClassifier()\r\n answer_type = question_classifier.classify_question(query)[0]\r\n \r\n return answer_type\r\n\r\n# print (classify_question(\"How many seasons are there in a year\"))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from cpp_service.SubService import SubService
import config
if __name__ == "__main__":
gateway = config.gateway["trading_system_gateway"]
host = gateway["host"]
port = gateway["port"]
server_id = gateway["server_id"]
licences = gateway["licences"]
service = SubService(host, port, server_id, licences)
"""订阅order"""
service.sub_order()
|
normal
|
{
"blob_id": "f72cdf8d91c31760335b96052a34615307f48727",
"index": 9774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n gateway = config.gateway['trading_system_gateway']\n host = gateway['host']\n port = gateway['port']\n server_id = gateway['server_id']\n licences = gateway['licences']\n service = SubService(host, port, server_id, licences)\n \"\"\"订阅order\"\"\"\n service.sub_order()\n",
"step-3": "from cpp_service.SubService import SubService\nimport config\nif __name__ == '__main__':\n gateway = config.gateway['trading_system_gateway']\n host = gateway['host']\n port = gateway['port']\n server_id = gateway['server_id']\n licences = gateway['licences']\n service = SubService(host, port, server_id, licences)\n \"\"\"订阅order\"\"\"\n service.sub_order()\n",
"step-4": "from cpp_service.SubService import SubService\nimport config\n\nif __name__ == \"__main__\":\n gateway = config.gateway[\"trading_system_gateway\"]\n host = gateway[\"host\"]\n port = gateway[\"port\"]\n server_id = gateway[\"server_id\"]\n licences = gateway[\"licences\"]\n\n service = SubService(host, port, server_id, licences)\n \"\"\"订阅order\"\"\"\n service.sub_order()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# EXERCISE:
# Plotting distributions pairwise (2)
# In this exercise, you will generate pairwise joint distributions again. This time, you will make two particular
# additions:
# - You will display regressions as well as scatter plots in the off-diagonal subplots. You will do this with the
# argument kind='reg' (where 'reg' means 'regression'). Another option for kind is 'scatter' (the default) that
# plots scatter plots in the off-diagonal subplots.
# - You will also visualize the joint distributions separated by continent of origin. You will do this with the
# keyword argument hue specifying the 'origin'.
# INSTRUCTIONS:
# - Plot the pairwise joint distributions separated by continent of origin and display the regressions.
# CODE:
# Print the first 5 rows of the DataFrame
print(auto.head())
# Plot the pairwise joint distributions grouped by 'origin' along with regression lines
sns.pairplot(auto, kind='reg', hue='origin')
# Display the plot
plt.show()
|
normal
|
{
"blob_id": "0eaaa81d3c8bc61368701e1916b42ede88b90d04",
"index": 412,
"step-1": "<mask token>\n",
"step-2": "print(auto.head())\nsns.pairplot(auto, kind='reg', hue='origin')\nplt.show()\n",
"step-3": "# EXERCISE:\n\n# Plotting distributions pairwise (2)\n\n# In this exercise, you will generate pairwise joint distributions again. This time, you will make two particular\n# additions:\n\n# - You will display regressions as well as scatter plots in the off-diagonal subplots. You will do this with the\n# argument kind='reg' (where 'reg' means 'regression'). Another option for kind is 'scatter' (the default) that\n# plots scatter plots in the off-diagonal subplots.\n# - You will also visualize the joint distributions separated by continent of origin. You will do this with the\n# keyword argument hue specifying the 'origin'.\n\n\n# INSTRUCTIONS:\n\n# - Plot the pairwise joint distributions separated by continent of origin and display the regressions.\n\n\n# CODE:\n\n# Print the first 5 rows of the DataFrame\nprint(auto.head())\n\n# Plot the pairwise joint distributions grouped by 'origin' along with regression lines\nsns.pairplot(auto, kind='reg', hue='origin')\n\n# Display the plot\nplt.show()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class ButtonActions(object):
<|reserved_special_token_0|>
def plot_rdf(self, display):
matplotlib.rcParams.update({'font.size': 10})
self.fig = plt.figure(figsize=(display.width, display.height))
self.display = display
rows, cols = self._get_rows_and_cols(display)
count = 0
for existing, (symbol, name) in zip(display.existing_elements,
display.rdf_names.items()):
if existing:
count += 1
if os.path.exists('rdf-' + str(name) + '.dat'):
arr = np.loadtxt('rdf-' + str(name) + '.dat')
else:
print('ERROR: RDF analysis for ' + str(name) +
' was not performed in this directory!')
ax = self.fig.add_subplot(rows, cols, count)
txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)
txt.set_text('ERROR: RDF analysis for ' + str(name) +
"""
was not performed in this directory!""")
plt.plot()
continue
x = arr[:, 0]
y = arr[:, 1]
ax = self.fig.add_subplot(rows, cols, count)
self.axs.append(ax)
sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,
y, name)
sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])
self.integrals.append(integrals)
self.scs.append(sc)
annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),
textcoords='offset points', bbox=dict(boxstyle='round',
fc='w'), arrowprops=dict(arrowstyle='->'))
annot.set_visible(False)
self.annots.append(annot)
plt.xlabel('Distance of ' + str(name) +
' to oxygen atoms in water / Å')
plt.ylabel('RDF')
plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))
ax.set_xlim([0, np.max(x)])
ax.axhline(y=1, ls='--', color=display.colors['mark'])
plt.plot(x, y, linestyle='-', color='#80b1d3')
plt.ion()
self.fig.canvas.mpl_connect('motion_notify_event', lambda event:
self._hover(event))
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _update_annot(self, ind, subplot_number: int):
index = ind['ind'][0]
integral = self.integrals[subplot_number][index]
text = '{0:.2f} waters'.format(integral)
annot = self.annots[subplot_number]
annot.xy = self.scs[subplot_number].get_offsets()[index]
annot.set_text(text)
annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])
annot.get_bbox_patch().set_alpha(0.4)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ButtonActions(object):
def __init__(self):
self.axs = []
self.integrals = []
self.scs = []
self.annots = []
def plot_rdf(self, display):
matplotlib.rcParams.update({'font.size': 10})
self.fig = plt.figure(figsize=(display.width, display.height))
self.display = display
rows, cols = self._get_rows_and_cols(display)
count = 0
for existing, (symbol, name) in zip(display.existing_elements,
display.rdf_names.items()):
if existing:
count += 1
if os.path.exists('rdf-' + str(name) + '.dat'):
arr = np.loadtxt('rdf-' + str(name) + '.dat')
else:
print('ERROR: RDF analysis for ' + str(name) +
' was not performed in this directory!')
ax = self.fig.add_subplot(rows, cols, count)
txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)
txt.set_text('ERROR: RDF analysis for ' + str(name) +
"""
was not performed in this directory!""")
plt.plot()
continue
x = arr[:, 0]
y = arr[:, 1]
ax = self.fig.add_subplot(rows, cols, count)
self.axs.append(ax)
sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,
y, name)
sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])
self.integrals.append(integrals)
self.scs.append(sc)
annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),
textcoords='offset points', bbox=dict(boxstyle='round',
fc='w'), arrowprops=dict(arrowstyle='->'))
annot.set_visible(False)
self.annots.append(annot)
plt.xlabel('Distance of ' + str(name) +
' to oxygen atoms in water / Å')
plt.ylabel('RDF')
plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))
ax.set_xlim([0, np.max(x)])
ax.axhline(y=1, ls='--', color=display.colors['mark'])
plt.plot(x, y, linestyle='-', color='#80b1d3')
plt.ion()
self.fig.canvas.mpl_connect('motion_notify_event', lambda event:
self._hover(event))
plt.show()
def _get_rows_and_cols(self, display) ->Tuple[int, int]:
true_count = sum(display.existing_elements)
if true_count % 2 == 0:
rows = int(round(true_count / 2))
cols = int(round(true_count / 2))
if true_count == 2:
rows = 2
else:
rows = int(round(true_count / 2 + 0.5))
cols = int(round(true_count / 2 + 0.5))
if true_count == 5:
cols = 2
return rows, cols
def _find_local_minima_and_maxima(self, distances: np.array, values: np
.array, name: str) ->Tuple[List[float], List[float], List[float]]:
n_local = 5
maxima = argrelextrema(values, np.greater, order=n_local)[0]
minima = argrelextrema(values, np.less, order=n_local)[0]
extrema = np.asarray(list(maxima) + list(minima))
ext_distances = [distances[x] for x in extrema]
ext_values = [values[x] for x in extrema]
integrals = self._get_integrals(extrema, name)
return ext_distances, ext_values, integrals
def _get_integrals(self, indices: np.array, name: str) ->List[float]:
arr = np.loadtxt('int-rdf-' + str(name) + '.dat')
return [arr[:, 1][i] for i in indices]
def _update_annot(self, ind, subplot_number: int):
index = ind['ind'][0]
integral = self.integrals[subplot_number][index]
text = '{0:.2f} waters'.format(integral)
annot = self.annots[subplot_number]
annot.xy = self.scs[subplot_number].get_offsets()[index]
annot.set_text(text)
annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])
annot.get_bbox_patch().set_alpha(0.4)
def _hover(self, event):
for i, a in enumerate(self.axs):
if event.inaxes == a:
contains, ind = self.scs[i].contains(event)
annot = self.annots[i]
visible = annot.get_visible()
if contains:
self._update_annot(ind, i)
annot.set_visible(True)
self.fig.canvas.draw_idle()
elif visible:
annot.set_visible(False)
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
__copyright__ = """
This code is licensed under the MIT license.
Copyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group
See LICENSE for details
"""
<|reserved_special_token_0|>
class ButtonActions(object):
def __init__(self):
self.axs = []
self.integrals = []
self.scs = []
self.annots = []
def plot_rdf(self, display):
matplotlib.rcParams.update({'font.size': 10})
self.fig = plt.figure(figsize=(display.width, display.height))
self.display = display
rows, cols = self._get_rows_and_cols(display)
count = 0
for existing, (symbol, name) in zip(display.existing_elements,
display.rdf_names.items()):
if existing:
count += 1
if os.path.exists('rdf-' + str(name) + '.dat'):
arr = np.loadtxt('rdf-' + str(name) + '.dat')
else:
print('ERROR: RDF analysis for ' + str(name) +
' was not performed in this directory!')
ax = self.fig.add_subplot(rows, cols, count)
txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)
txt.set_text('ERROR: RDF analysis for ' + str(name) +
"""
was not performed in this directory!""")
plt.plot()
continue
x = arr[:, 0]
y = arr[:, 1]
ax = self.fig.add_subplot(rows, cols, count)
self.axs.append(ax)
sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,
y, name)
sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])
self.integrals.append(integrals)
self.scs.append(sc)
annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),
textcoords='offset points', bbox=dict(boxstyle='round',
fc='w'), arrowprops=dict(arrowstyle='->'))
annot.set_visible(False)
self.annots.append(annot)
plt.xlabel('Distance of ' + str(name) +
' to oxygen atoms in water / Å')
plt.ylabel('RDF')
plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))
ax.set_xlim([0, np.max(x)])
ax.axhline(y=1, ls='--', color=display.colors['mark'])
plt.plot(x, y, linestyle='-', color='#80b1d3')
plt.ion()
self.fig.canvas.mpl_connect('motion_notify_event', lambda event:
self._hover(event))
plt.show()
def _get_rows_and_cols(self, display) ->Tuple[int, int]:
true_count = sum(display.existing_elements)
if true_count % 2 == 0:
rows = int(round(true_count / 2))
cols = int(round(true_count / 2))
if true_count == 2:
rows = 2
else:
rows = int(round(true_count / 2 + 0.5))
cols = int(round(true_count / 2 + 0.5))
if true_count == 5:
cols = 2
return rows, cols
def _find_local_minima_and_maxima(self, distances: np.array, values: np
.array, name: str) ->Tuple[List[float], List[float], List[float]]:
n_local = 5
maxima = argrelextrema(values, np.greater, order=n_local)[0]
minima = argrelextrema(values, np.less, order=n_local)[0]
extrema = np.asarray(list(maxima) + list(minima))
ext_distances = [distances[x] for x in extrema]
ext_values = [values[x] for x in extrema]
integrals = self._get_integrals(extrema, name)
return ext_distances, ext_values, integrals
def _get_integrals(self, indices: np.array, name: str) ->List[float]:
arr = np.loadtxt('int-rdf-' + str(name) + '.dat')
return [arr[:, 1][i] for i in indices]
def _update_annot(self, ind, subplot_number: int):
index = ind['ind'][0]
integral = self.integrals[subplot_number][index]
text = '{0:.2f} waters'.format(integral)
annot = self.annots[subplot_number]
annot.xy = self.scs[subplot_number].get_offsets()[index]
annot.set_text(text)
annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])
annot.get_bbox_patch().set_alpha(0.4)
def _hover(self, event):
for i, a in enumerate(self.axs):
if event.inaxes == a:
contains, ind = self.scs[i].contains(event)
annot = self.annots[i]
visible = annot.get_visible()
if contains:
self._update_annot(ind, i)
annot.set_visible(True)
self.fig.canvas.draw_idle()
elif visible:
annot.set_visible(False)
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
__copyright__ = """
This code is licensed under the MIT license.
Copyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group
See LICENSE for details
"""
from scipy.signal import argrelextrema
from typing import List, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
class ButtonActions(object):
def __init__(self):
self.axs = []
self.integrals = []
self.scs = []
self.annots = []
def plot_rdf(self, display):
matplotlib.rcParams.update({'font.size': 10})
self.fig = plt.figure(figsize=(display.width, display.height))
self.display = display
rows, cols = self._get_rows_and_cols(display)
count = 0
for existing, (symbol, name) in zip(display.existing_elements,
display.rdf_names.items()):
if existing:
count += 1
if os.path.exists('rdf-' + str(name) + '.dat'):
arr = np.loadtxt('rdf-' + str(name) + '.dat')
else:
print('ERROR: RDF analysis for ' + str(name) +
' was not performed in this directory!')
ax = self.fig.add_subplot(rows, cols, count)
txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)
txt.set_text('ERROR: RDF analysis for ' + str(name) +
"""
was not performed in this directory!""")
plt.plot()
continue
x = arr[:, 0]
y = arr[:, 1]
ax = self.fig.add_subplot(rows, cols, count)
self.axs.append(ax)
sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,
y, name)
sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])
self.integrals.append(integrals)
self.scs.append(sc)
annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),
textcoords='offset points', bbox=dict(boxstyle='round',
fc='w'), arrowprops=dict(arrowstyle='->'))
annot.set_visible(False)
self.annots.append(annot)
plt.xlabel('Distance of ' + str(name) +
' to oxygen atoms in water / Å')
plt.ylabel('RDF')
plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))
ax.set_xlim([0, np.max(x)])
ax.axhline(y=1, ls='--', color=display.colors['mark'])
plt.plot(x, y, linestyle='-', color='#80b1d3')
plt.ion()
self.fig.canvas.mpl_connect('motion_notify_event', lambda event:
self._hover(event))
plt.show()
def _get_rows_and_cols(self, display) ->Tuple[int, int]:
true_count = sum(display.existing_elements)
if true_count % 2 == 0:
rows = int(round(true_count / 2))
cols = int(round(true_count / 2))
if true_count == 2:
rows = 2
else:
rows = int(round(true_count / 2 + 0.5))
cols = int(round(true_count / 2 + 0.5))
if true_count == 5:
cols = 2
return rows, cols
def _find_local_minima_and_maxima(self, distances: np.array, values: np
.array, name: str) ->Tuple[List[float], List[float], List[float]]:
n_local = 5
maxima = argrelextrema(values, np.greater, order=n_local)[0]
minima = argrelextrema(values, np.less, order=n_local)[0]
extrema = np.asarray(list(maxima) + list(minima))
ext_distances = [distances[x] for x in extrema]
ext_values = [values[x] for x in extrema]
integrals = self._get_integrals(extrema, name)
return ext_distances, ext_values, integrals
def _get_integrals(self, indices: np.array, name: str) ->List[float]:
arr = np.loadtxt('int-rdf-' + str(name) + '.dat')
return [arr[:, 1][i] for i in indices]
def _update_annot(self, ind, subplot_number: int):
index = ind['ind'][0]
integral = self.integrals[subplot_number][index]
text = '{0:.2f} waters'.format(integral)
annot = self.annots[subplot_number]
annot.xy = self.scs[subplot_number].get_offsets()[index]
annot.set_text(text)
annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])
annot.get_bbox_patch().set_alpha(0.4)
def _hover(self, event):
for i, a in enumerate(self.axs):
if event.inaxes == a:
contains, ind = self.scs[i].contains(event)
annot = self.annots[i]
visible = annot.get_visible()
if contains:
self._update_annot(ind, i)
annot.set_visible(True)
self.fig.canvas.draw_idle()
elif visible:
annot.set_visible(False)
self.fig.canvas.draw_idle()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = """
This code is licensed under the MIT license.
Copyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group
See LICENSE for details
"""
from scipy.signal import argrelextrema
from typing import List, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
class ButtonActions(object):
def __init__(self):
self.axs = []
self.integrals = []
self.scs = []
self.annots = []
def plot_rdf(self, display):
matplotlib.rcParams.update({'font.size': 10})
self.fig = plt.figure(figsize=(display.width, display.height))
self.display = display
rows, cols = self._get_rows_and_cols(display)
count = 0 # only count existing -> not enumerate
for existing, (symbol, name) in zip(display.existing_elements, display.rdf_names.items()):
if existing:
count += 1
if os.path.exists('rdf-' + str(name) + '.dat'):
arr = np.loadtxt("rdf-" + str(name) + ".dat")
else:
print("ERROR: RDF analysis for " + str(name) + " was not performed in this directory!")
ax = self.fig.add_subplot(rows, cols, count)
txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)
txt.set_text("ERROR: RDF analysis for " + str(name) + "\nwas not performed in this directory!")
plt.plot()
continue
x = arr[:, 0]
y = arr[:, 1]
ax = self.fig.add_subplot(rows, cols, count)
self.axs.append(ax)
# determine integrals
sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x, y, name)
sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])
self.integrals.append(integrals)
self.scs.append(sc)
annot = ax.annotate("", xy=(0, 0), xytext=(20, 20), textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"), arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
self.annots.append(annot)
# title and label specifications
plt.xlabel("Distance of " + str(name) + ' to oxygen atoms in water / \u00c5')
plt.ylabel('RDF')
plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))
ax.set_xlim([0, np.max(x)])
ax.axhline(y=1, ls='--', color=display.colors['mark'])
plt.plot(x, y, linestyle="-", color='#80b1d3')
plt.ion() # avoids 'The event loop is already running' error message
self.fig.canvas.mpl_connect('motion_notify_event', lambda event: self._hover(event))
plt.show()
def _get_rows_and_cols(self, display) -> Tuple[int, int]:
true_count = sum(display.existing_elements)
if true_count % 2 == 0:
rows = int(round(true_count / 2))
cols = int(round(true_count / 2))
if true_count == 2:
rows = 2
else:
rows = int(round(true_count / 2 + 0.5))
cols = int(round(true_count / 2 + 0.5))
if true_count == 5:
cols = 2
return rows, cols
def _find_local_minima_and_maxima(self, distances: np.array, values: np.array, name: str) -> Tuple[List[float],
List[float],
List[float]]:
n_local = 5
maxima = argrelextrema(values, np.greater, order=n_local)[0]
minima = argrelextrema(values, np.less, order=n_local)[0]
extrema = np.asarray(list(maxima) + list(minima))
ext_distances = [distances[x] for x in extrema]
ext_values = [values[x] for x in extrema]
integrals = self._get_integrals(extrema, name)
return ext_distances, ext_values, integrals
def _get_integrals(self, indices: np.array, name: str) -> List[float]:
arr = np.loadtxt("int-rdf-" + str(name) + ".dat")
return [arr[:, 1][i] for i in indices]
def _update_annot(self, ind, subplot_number: int):
index = ind['ind'][0]
integral = self.integrals[subplot_number][index]
text = "{0:.2f} waters".format(integral)
annot = self.annots[subplot_number]
annot.xy = self.scs[subplot_number].get_offsets()[index]
annot.set_text(text)
annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])
annot.get_bbox_patch().set_alpha(0.4)
def _hover(self, event):
for i, a in enumerate(self.axs):
if event.inaxes == a:
contains, ind = self.scs[i].contains(event)
annot = self.annots[i]
visible = annot.get_visible()
if contains:
self._update_annot(ind, i)
annot.set_visible(True)
self.fig.canvas.draw_idle()
else:
if visible:
annot.set_visible(False)
self.fig.canvas.draw_idle()
|
flexible
|
{
"blob_id": "8c42e06fd92f0110b3ba8c4e7cc0ac45b9e44378",
"index": 3150,
"step-1": "<mask token>\n\n\nclass ButtonActions(object):\n <mask token>\n\n def plot_rdf(self, display):\n matplotlib.rcParams.update({'font.size': 10})\n self.fig = plt.figure(figsize=(display.width, display.height))\n self.display = display\n rows, cols = self._get_rows_and_cols(display)\n count = 0\n for existing, (symbol, name) in zip(display.existing_elements,\n display.rdf_names.items()):\n if existing:\n count += 1\n if os.path.exists('rdf-' + str(name) + '.dat'):\n arr = np.loadtxt('rdf-' + str(name) + '.dat')\n else:\n print('ERROR: RDF analysis for ' + str(name) +\n ' was not performed in this directory!')\n ax = self.fig.add_subplot(rows, cols, count)\n txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)\n txt.set_text('ERROR: RDF analysis for ' + str(name) +\n \"\"\"\nwas not performed in this directory!\"\"\")\n plt.plot()\n continue\n x = arr[:, 0]\n y = arr[:, 1]\n ax = self.fig.add_subplot(rows, cols, count)\n self.axs.append(ax)\n sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,\n y, name)\n sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])\n self.integrals.append(integrals)\n self.scs.append(sc)\n annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),\n textcoords='offset points', bbox=dict(boxstyle='round',\n fc='w'), arrowprops=dict(arrowstyle='->'))\n annot.set_visible(False)\n self.annots.append(annot)\n plt.xlabel('Distance of ' + str(name) +\n ' to oxygen atoms in water / Å')\n plt.ylabel('RDF')\n plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))\n ax.set_xlim([0, np.max(x)])\n ax.axhline(y=1, ls='--', color=display.colors['mark'])\n plt.plot(x, y, linestyle='-', color='#80b1d3')\n plt.ion()\n self.fig.canvas.mpl_connect('motion_notify_event', lambda event:\n self._hover(event))\n plt.show()\n <mask token>\n <mask token>\n <mask token>\n\n def _update_annot(self, ind, subplot_number: int):\n index = ind['ind'][0]\n integral = self.integrals[subplot_number][index]\n text = '{0:.2f} waters'.format(integral)\n annot = self.annots[subplot_number]\n annot.xy = self.scs[subplot_number].get_offsets()[index]\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])\n annot.get_bbox_patch().set_alpha(0.4)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ButtonActions(object):\n\n def __init__(self):\n self.axs = []\n self.integrals = []\n self.scs = []\n self.annots = []\n\n def plot_rdf(self, display):\n matplotlib.rcParams.update({'font.size': 10})\n self.fig = plt.figure(figsize=(display.width, display.height))\n self.display = display\n rows, cols = self._get_rows_and_cols(display)\n count = 0\n for existing, (symbol, name) in zip(display.existing_elements,\n display.rdf_names.items()):\n if existing:\n count += 1\n if os.path.exists('rdf-' + str(name) + '.dat'):\n arr = np.loadtxt('rdf-' + str(name) + '.dat')\n else:\n print('ERROR: RDF analysis for ' + str(name) +\n ' was not performed in this directory!')\n ax = self.fig.add_subplot(rows, cols, count)\n txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)\n txt.set_text('ERROR: RDF analysis for ' + str(name) +\n \"\"\"\nwas not performed in this directory!\"\"\")\n plt.plot()\n continue\n x = arr[:, 0]\n y = arr[:, 1]\n ax = self.fig.add_subplot(rows, cols, count)\n self.axs.append(ax)\n sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,\n y, name)\n sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])\n self.integrals.append(integrals)\n self.scs.append(sc)\n annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),\n textcoords='offset points', bbox=dict(boxstyle='round',\n fc='w'), arrowprops=dict(arrowstyle='->'))\n annot.set_visible(False)\n self.annots.append(annot)\n plt.xlabel('Distance of ' + str(name) +\n ' to oxygen atoms in water / Å')\n plt.ylabel('RDF')\n plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))\n ax.set_xlim([0, np.max(x)])\n ax.axhline(y=1, ls='--', color=display.colors['mark'])\n plt.plot(x, y, linestyle='-', color='#80b1d3')\n plt.ion()\n self.fig.canvas.mpl_connect('motion_notify_event', lambda event:\n self._hover(event))\n plt.show()\n\n def _get_rows_and_cols(self, display) ->Tuple[int, int]:\n true_count = sum(display.existing_elements)\n if true_count % 2 == 0:\n rows = int(round(true_count / 2))\n cols = int(round(true_count / 2))\n if true_count == 2:\n rows = 2\n else:\n rows = int(round(true_count / 2 + 0.5))\n cols = int(round(true_count / 2 + 0.5))\n if true_count == 5:\n cols = 2\n return rows, cols\n\n def _find_local_minima_and_maxima(self, distances: np.array, values: np\n .array, name: str) ->Tuple[List[float], List[float], List[float]]:\n n_local = 5\n maxima = argrelextrema(values, np.greater, order=n_local)[0]\n minima = argrelextrema(values, np.less, order=n_local)[0]\n extrema = np.asarray(list(maxima) + list(minima))\n ext_distances = [distances[x] for x in extrema]\n ext_values = [values[x] for x in extrema]\n integrals = self._get_integrals(extrema, name)\n return ext_distances, ext_values, integrals\n\n def _get_integrals(self, indices: np.array, name: str) ->List[float]:\n arr = np.loadtxt('int-rdf-' + str(name) + '.dat')\n return [arr[:, 1][i] for i in indices]\n\n def _update_annot(self, ind, subplot_number: int):\n index = ind['ind'][0]\n integral = self.integrals[subplot_number][index]\n text = '{0:.2f} waters'.format(integral)\n annot = self.annots[subplot_number]\n annot.xy = self.scs[subplot_number].get_offsets()[index]\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])\n annot.get_bbox_patch().set_alpha(0.4)\n\n def _hover(self, event):\n for i, a in enumerate(self.axs):\n if event.inaxes == a:\n contains, ind = self.scs[i].contains(event)\n annot = self.annots[i]\n visible = annot.get_visible()\n if contains:\n self._update_annot(ind, i)\n annot.set_visible(True)\n self.fig.canvas.draw_idle()\n elif visible:\n annot.set_visible(False)\n self.fig.canvas.draw_idle()\n",
"step-3": "__copyright__ = \"\"\"\nThis code is licensed under the MIT license.\nCopyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group\nSee LICENSE for details\n\"\"\"\n<mask token>\n\n\nclass ButtonActions(object):\n\n def __init__(self):\n self.axs = []\n self.integrals = []\n self.scs = []\n self.annots = []\n\n def plot_rdf(self, display):\n matplotlib.rcParams.update({'font.size': 10})\n self.fig = plt.figure(figsize=(display.width, display.height))\n self.display = display\n rows, cols = self._get_rows_and_cols(display)\n count = 0\n for existing, (symbol, name) in zip(display.existing_elements,\n display.rdf_names.items()):\n if existing:\n count += 1\n if os.path.exists('rdf-' + str(name) + '.dat'):\n arr = np.loadtxt('rdf-' + str(name) + '.dat')\n else:\n print('ERROR: RDF analysis for ' + str(name) +\n ' was not performed in this directory!')\n ax = self.fig.add_subplot(rows, cols, count)\n txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)\n txt.set_text('ERROR: RDF analysis for ' + str(name) +\n \"\"\"\nwas not performed in this directory!\"\"\")\n plt.plot()\n continue\n x = arr[:, 0]\n y = arr[:, 1]\n ax = self.fig.add_subplot(rows, cols, count)\n self.axs.append(ax)\n sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,\n y, name)\n sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])\n self.integrals.append(integrals)\n self.scs.append(sc)\n annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),\n textcoords='offset points', bbox=dict(boxstyle='round',\n fc='w'), arrowprops=dict(arrowstyle='->'))\n annot.set_visible(False)\n self.annots.append(annot)\n plt.xlabel('Distance of ' + str(name) +\n ' to oxygen atoms in water / Å')\n plt.ylabel('RDF')\n plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))\n ax.set_xlim([0, np.max(x)])\n ax.axhline(y=1, ls='--', color=display.colors['mark'])\n plt.plot(x, y, linestyle='-', color='#80b1d3')\n plt.ion()\n self.fig.canvas.mpl_connect('motion_notify_event', lambda event:\n self._hover(event))\n plt.show()\n\n def _get_rows_and_cols(self, display) ->Tuple[int, int]:\n true_count = sum(display.existing_elements)\n if true_count % 2 == 0:\n rows = int(round(true_count / 2))\n cols = int(round(true_count / 2))\n if true_count == 2:\n rows = 2\n else:\n rows = int(round(true_count / 2 + 0.5))\n cols = int(round(true_count / 2 + 0.5))\n if true_count == 5:\n cols = 2\n return rows, cols\n\n def _find_local_minima_and_maxima(self, distances: np.array, values: np\n .array, name: str) ->Tuple[List[float], List[float], List[float]]:\n n_local = 5\n maxima = argrelextrema(values, np.greater, order=n_local)[0]\n minima = argrelextrema(values, np.less, order=n_local)[0]\n extrema = np.asarray(list(maxima) + list(minima))\n ext_distances = [distances[x] for x in extrema]\n ext_values = [values[x] for x in extrema]\n integrals = self._get_integrals(extrema, name)\n return ext_distances, ext_values, integrals\n\n def _get_integrals(self, indices: np.array, name: str) ->List[float]:\n arr = np.loadtxt('int-rdf-' + str(name) + '.dat')\n return [arr[:, 1][i] for i in indices]\n\n def _update_annot(self, ind, subplot_number: int):\n index = ind['ind'][0]\n integral = self.integrals[subplot_number][index]\n text = '{0:.2f} waters'.format(integral)\n annot = self.annots[subplot_number]\n annot.xy = self.scs[subplot_number].get_offsets()[index]\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])\n annot.get_bbox_patch().set_alpha(0.4)\n\n def _hover(self, event):\n for i, a in enumerate(self.axs):\n if event.inaxes == a:\n contains, ind = self.scs[i].contains(event)\n annot = self.annots[i]\n visible = annot.get_visible()\n if contains:\n self._update_annot(ind, i)\n annot.set_visible(True)\n self.fig.canvas.draw_idle()\n elif visible:\n annot.set_visible(False)\n self.fig.canvas.draw_idle()\n",
"step-4": "__copyright__ = \"\"\"\nThis code is licensed under the MIT license.\nCopyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group\nSee LICENSE for details\n\"\"\"\nfrom scipy.signal import argrelextrema\nfrom typing import List, Tuple\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\nclass ButtonActions(object):\n\n def __init__(self):\n self.axs = []\n self.integrals = []\n self.scs = []\n self.annots = []\n\n def plot_rdf(self, display):\n matplotlib.rcParams.update({'font.size': 10})\n self.fig = plt.figure(figsize=(display.width, display.height))\n self.display = display\n rows, cols = self._get_rows_and_cols(display)\n count = 0\n for existing, (symbol, name) in zip(display.existing_elements,\n display.rdf_names.items()):\n if existing:\n count += 1\n if os.path.exists('rdf-' + str(name) + '.dat'):\n arr = np.loadtxt('rdf-' + str(name) + '.dat')\n else:\n print('ERROR: RDF analysis for ' + str(name) +\n ' was not performed in this directory!')\n ax = self.fig.add_subplot(rows, cols, count)\n txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)\n txt.set_text('ERROR: RDF analysis for ' + str(name) +\n \"\"\"\nwas not performed in this directory!\"\"\")\n plt.plot()\n continue\n x = arr[:, 0]\n y = arr[:, 1]\n ax = self.fig.add_subplot(rows, cols, count)\n self.axs.append(ax)\n sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x,\n y, name)\n sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])\n self.integrals.append(integrals)\n self.scs.append(sc)\n annot = ax.annotate('', xy=(0, 0), xytext=(20, 20),\n textcoords='offset points', bbox=dict(boxstyle='round',\n fc='w'), arrowprops=dict(arrowstyle='->'))\n annot.set_visible(False)\n self.annots.append(annot)\n plt.xlabel('Distance of ' + str(name) +\n ' to oxygen atoms in water / Å')\n plt.ylabel('RDF')\n plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))\n ax.set_xlim([0, np.max(x)])\n ax.axhline(y=1, ls='--', color=display.colors['mark'])\n plt.plot(x, y, linestyle='-', color='#80b1d3')\n plt.ion()\n self.fig.canvas.mpl_connect('motion_notify_event', lambda event:\n self._hover(event))\n plt.show()\n\n def _get_rows_and_cols(self, display) ->Tuple[int, int]:\n true_count = sum(display.existing_elements)\n if true_count % 2 == 0:\n rows = int(round(true_count / 2))\n cols = int(round(true_count / 2))\n if true_count == 2:\n rows = 2\n else:\n rows = int(round(true_count / 2 + 0.5))\n cols = int(round(true_count / 2 + 0.5))\n if true_count == 5:\n cols = 2\n return rows, cols\n\n def _find_local_minima_and_maxima(self, distances: np.array, values: np\n .array, name: str) ->Tuple[List[float], List[float], List[float]]:\n n_local = 5\n maxima = argrelextrema(values, np.greater, order=n_local)[0]\n minima = argrelextrema(values, np.less, order=n_local)[0]\n extrema = np.asarray(list(maxima) + list(minima))\n ext_distances = [distances[x] for x in extrema]\n ext_values = [values[x] for x in extrema]\n integrals = self._get_integrals(extrema, name)\n return ext_distances, ext_values, integrals\n\n def _get_integrals(self, indices: np.array, name: str) ->List[float]:\n arr = np.loadtxt('int-rdf-' + str(name) + '.dat')\n return [arr[:, 1][i] for i in indices]\n\n def _update_annot(self, ind, subplot_number: int):\n index = ind['ind'][0]\n integral = self.integrals[subplot_number][index]\n text = '{0:.2f} waters'.format(integral)\n annot = self.annots[subplot_number]\n annot.xy = self.scs[subplot_number].get_offsets()[index]\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])\n annot.get_bbox_patch().set_alpha(0.4)\n\n def _hover(self, event):\n for i, a in enumerate(self.axs):\n if event.inaxes == a:\n contains, ind = self.scs[i].contains(event)\n annot = self.annots[i]\n visible = annot.get_visible()\n if contains:\n self._update_annot(ind, i)\n annot.set_visible(True)\n self.fig.canvas.draw_idle()\n elif visible:\n annot.set_visible(False)\n self.fig.canvas.draw_idle()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__copyright__ = \"\"\"\nThis code is licensed under the MIT license.\nCopyright University Innsbruck, Institute for General, Inorganic, and Theoretical Chemistry, Podewitz Group\nSee LICENSE for details\n\"\"\"\n\nfrom scipy.signal import argrelextrema\nfrom typing import List, Tuple\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\nclass ButtonActions(object):\n def __init__(self):\n self.axs = []\n self.integrals = []\n self.scs = []\n self.annots = []\n\n def plot_rdf(self, display):\n matplotlib.rcParams.update({'font.size': 10})\n self.fig = plt.figure(figsize=(display.width, display.height))\n self.display = display\n\n rows, cols = self._get_rows_and_cols(display)\n\n count = 0 # only count existing -> not enumerate\n for existing, (symbol, name) in zip(display.existing_elements, display.rdf_names.items()):\n if existing:\n count += 1\n if os.path.exists('rdf-' + str(name) + '.dat'):\n arr = np.loadtxt(\"rdf-\" + str(name) + \".dat\")\n else:\n print(\"ERROR: RDF analysis for \" + str(name) + \" was not performed in this directory!\")\n ax = self.fig.add_subplot(rows, cols, count)\n txt = ax.text(0.1, 0.5, '', transform=ax.transAxes)\n txt.set_text(\"ERROR: RDF analysis for \" + str(name) + \"\\nwas not performed in this directory!\")\n plt.plot()\n continue\n\n x = arr[:, 0]\n y = arr[:, 1]\n ax = self.fig.add_subplot(rows, cols, count)\n self.axs.append(ax)\n\n # determine integrals\n sc_x, sc_y, integrals = self._find_local_minima_and_maxima(x, y, name)\n sc = plt.scatter(sc_x, sc_y, s=10, c=display.colors['mark'])\n self.integrals.append(integrals)\n self.scs.append(sc)\n annot = ax.annotate(\"\", xy=(0, 0), xytext=(20, 20), textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"), arrowprops=dict(arrowstyle=\"->\"))\n annot.set_visible(False)\n self.annots.append(annot)\n\n # title and label specifications\n plt.xlabel(\"Distance of \" + str(name) + ' to oxygen atoms in water / \\u00c5')\n plt.ylabel('RDF')\n plt.xticks(np.arange(0, np.max(x) + 0.5, step=0.5))\n ax.set_xlim([0, np.max(x)])\n ax.axhline(y=1, ls='--', color=display.colors['mark'])\n plt.plot(x, y, linestyle=\"-\", color='#80b1d3')\n\n plt.ion() # avoids 'The event loop is already running' error message\n self.fig.canvas.mpl_connect('motion_notify_event', lambda event: self._hover(event))\n plt.show()\n\n def _get_rows_and_cols(self, display) -> Tuple[int, int]:\n true_count = sum(display.existing_elements)\n if true_count % 2 == 0:\n rows = int(round(true_count / 2))\n cols = int(round(true_count / 2))\n if true_count == 2:\n rows = 2\n else:\n rows = int(round(true_count / 2 + 0.5))\n cols = int(round(true_count / 2 + 0.5))\n if true_count == 5:\n cols = 2\n return rows, cols\n\n def _find_local_minima_and_maxima(self, distances: np.array, values: np.array, name: str) -> Tuple[List[float],\n List[float],\n List[float]]:\n n_local = 5\n maxima = argrelextrema(values, np.greater, order=n_local)[0]\n minima = argrelextrema(values, np.less, order=n_local)[0]\n extrema = np.asarray(list(maxima) + list(minima))\n ext_distances = [distances[x] for x in extrema]\n ext_values = [values[x] for x in extrema]\n integrals = self._get_integrals(extrema, name)\n return ext_distances, ext_values, integrals\n\n def _get_integrals(self, indices: np.array, name: str) -> List[float]:\n arr = np.loadtxt(\"int-rdf-\" + str(name) + \".dat\")\n return [arr[:, 1][i] for i in indices]\n\n def _update_annot(self, ind, subplot_number: int):\n index = ind['ind'][0]\n integral = self.integrals[subplot_number][index]\n text = \"{0:.2f} waters\".format(integral)\n annot = self.annots[subplot_number]\n annot.xy = self.scs[subplot_number].get_offsets()[index]\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(self.display.colors['mark'])\n annot.get_bbox_patch().set_alpha(0.4)\n\n def _hover(self, event):\n for i, a in enumerate(self.axs):\n if event.inaxes == a:\n contains, ind = self.scs[i].contains(event)\n annot = self.annots[i]\n visible = annot.get_visible()\n if contains:\n self._update_annot(ind, i)\n annot.set_visible(True)\n self.fig.canvas.draw_idle()\n else:\n if visible:\n annot.set_visible(False)\n self.fig.canvas.draw_idle()\n",
"step-ids": [
3,
8,
9,
10,
11
]
}
|
[
3,
8,
9,
10,
11
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 20:29:49 2019
@author: kzx789
"""
from PIL import Image
import os, glob, numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import cv2
import pymysql
import MySQLdb as mysql
"""
#csv를 읽어서 영양정보 출력
def get_Nutrition(str) :
nutrition = pd.read_csv('C:/식품영양정보/영양정보.csv')
print(nutrition[nutrition['음식명'] == str])
"""
#사용된 전체 이미지 출력
def drawing_plt():
thisImg = os.listdir(caltech_dir)
row = 4
cols = int(math.ceil(len(thisImg)/4)) #반올림
fig = plt.figure()
i = 1
for image in glob.glob("C:/cnnTest/*.jpg"): #glob를 사용해서 Test로 사용된 파일 가져오기
img = cv2.imread(image)
subplot = fig.add_subplot(row, cols, i)
subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) #기본컬러
subplot.set_title(thisImg[i-1]) #타이틀 붙이기
subplot.axis("off")
i += 1
print('\t',"전체 이미지 리스트 ")
plt.show()
#조건에 맞는 개별 이미지 출력
def get_Image(str):
imgPath = 'C:/cnnTest/'
image = cv2.imread(imgPath+str)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
#데이터베이스에서 영양소 정보 가지고 오기
def get_DB_Nutrition(str):
db = pymysql.connect(host="localhost", user = "yeha", password="", db="nutrition")
cur = db.cursor() #Connection에서 Cursor생성
sql = "SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s"
cur.execute(sql,(str))
data = cur.fetchall() #정보 전부 가져오기
df = pd.Series(data[0],data[1])
print(df)
db.close()
caltech_dir = "C:/cnnTest"
#테스트할 데이터들을 128*128로 지정
image_w = 128
image_h = 128
pixels = image_h * image_w * 3 #픽셀 지정
X = []
#filenames = []
files = os.listdir(caltech_dir) #하위 디렉터리 파일 리스트 구하기
#print(files) #이미지 목록 확인
for i in range(len(files)):
files[i]=caltech_dir+'/'+ files[i]
#print(files)
for f in files:
img = Image.open(f)
img = img.convert("RGB")
img = img.resize((image_w, image_h))
data = np.asarray(img)
# filenames.append(f)
X.append(data)
X = np.array(X)
#print(X)
#모델 불러오기
from keras.models import load_model
model = load_model("C:/image/train/model/multi_img_classification.model")
prediction = model.predict(X)
#print(prediction)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print('프로그램을 실행합니다..')
print('\n')
thisImg = os.listdir(caltech_dir)
cnt = 0
for i in prediction:
pre_ans = i.argmax() # 예측 레이블//가장 큰 번째 수
#print(i)
#print(pre_ans)
pre_ans_str = ''
if pre_ans == 0: pre_ans_str = "연어회"
elif pre_ans == 1: pre_ans_str = "쌀국수"
elif pre_ans == 2: pre_ans_str = "샌드위치"
else: pre_ans_str = "새우튀김"
if i[0] >= 0.8 :
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[1] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[2] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
if i[3] >= 0.8:
get_Image(thisImg[cnt])
print(thisImg[cnt]+" 이미지는 "+pre_ans_str+"(으)로 추정됩니다.")
#get_Nutrition(pre_ans_str)
get_DB_Nutrition(pre_ans_str)
cnt += 1
drawing_plt()
|
normal
|
{
"blob_id": "1255a9df2fbe11d92991f3f0f7054b92cb017628",
"index": 2941,
"step-1": "<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\n<mask token>\nfor i in range(len(files)):\n files[i] = caltech_dir + '/' + files[i]\nfor f in files:\n img = Image.open(f)\n img = img.convert('RGB')\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n X.append(data)\n<mask token>\nnp.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})\nprint('프로그램을 실행합니다..')\nprint('\\n')\n<mask token>\nfor i in prediction:\n pre_ans = i.argmax()\n pre_ans_str = ''\n if pre_ans == 0:\n pre_ans_str = '연어회'\n elif pre_ans == 1:\n pre_ans_str = '쌀국수'\n elif pre_ans == 2:\n pre_ans_str = '샌드위치'\n else:\n pre_ans_str = '새우튀김'\n if i[0] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[1] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[2] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[3] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n cnt += 1\ndrawing_plt()\n",
"step-3": "<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\ncaltech_dir = 'C:/cnnTest'\nimage_w = 128\nimage_h = 128\npixels = image_h * image_w * 3\nX = []\nfiles = os.listdir(caltech_dir)\nfor i in range(len(files)):\n files[i] = caltech_dir + '/' + files[i]\nfor f in files:\n img = Image.open(f)\n img = img.convert('RGB')\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n X.append(data)\nX = np.array(X)\n<mask token>\nmodel = load_model('C:/image/train/model/multi_img_classification.model')\nprediction = model.predict(X)\nnp.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})\nprint('프로그램을 실행합니다..')\nprint('\\n')\nthisImg = os.listdir(caltech_dir)\ncnt = 0\nfor i in prediction:\n pre_ans = i.argmax()\n pre_ans_str = ''\n if pre_ans == 0:\n pre_ans_str = '연어회'\n elif pre_ans == 1:\n pre_ans_str = '쌀국수'\n elif pre_ans == 2:\n pre_ans_str = '샌드위치'\n else:\n pre_ans_str = '새우튀김'\n if i[0] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[1] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[2] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[3] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n cnt += 1\ndrawing_plt()\n",
"step-4": "<mask token>\nfrom PIL import Image\nimport os, glob, numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\nimport pymysql\nimport MySQLdb as mysql\n<mask token>\n\n\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg) / 4))\n fig = plt.figure()\n i = 1\n for image in glob.glob('C:/cnnTest/*.jpg'):\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n subplot.set_title(thisImg[i - 1])\n subplot.axis('off')\n i += 1\n print('\\t', '전체 이미지 리스트 ')\n plt.show()\n\n\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath + str)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host='localhost', user='yeha', password='', db=\n 'nutrition')\n cur = db.cursor()\n sql = (\n \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n )\n cur.execute(sql, str)\n data = cur.fetchall()\n df = pd.Series(data[0], data[1])\n print(df)\n db.close()\n\n\ncaltech_dir = 'C:/cnnTest'\nimage_w = 128\nimage_h = 128\npixels = image_h * image_w * 3\nX = []\nfiles = os.listdir(caltech_dir)\nfor i in range(len(files)):\n files[i] = caltech_dir + '/' + files[i]\nfor f in files:\n img = Image.open(f)\n img = img.convert('RGB')\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n X.append(data)\nX = np.array(X)\nfrom keras.models import load_model\nmodel = load_model('C:/image/train/model/multi_img_classification.model')\nprediction = model.predict(X)\nnp.set_printoptions(formatter={'float': lambda x: '{0:0.3f}'.format(x)})\nprint('프로그램을 실행합니다..')\nprint('\\n')\nthisImg = os.listdir(caltech_dir)\ncnt = 0\nfor i in prediction:\n pre_ans = i.argmax()\n pre_ans_str = ''\n if pre_ans == 0:\n pre_ans_str = '연어회'\n elif pre_ans == 1:\n pre_ans_str = '쌀국수'\n elif pre_ans == 2:\n pre_ans_str = '샌드위치'\n else:\n pre_ans_str = '새우튀김'\n if i[0] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[1] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[2] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n if i[3] >= 0.8:\n get_Image(thisImg[cnt])\n print(thisImg[cnt] + ' 이미지는 ' + pre_ans_str + '(으)로 추정됩니다.')\n get_DB_Nutrition(pre_ans_str)\n cnt += 1\ndrawing_plt()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 20:29:49 2019\n\n@author: kzx789\n\"\"\"\n\nfrom PIL import Image\nimport os, glob, numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\nimport pymysql\nimport MySQLdb as mysql\n\n\"\"\"\n#csv를 읽어서 영양정보 출력\ndef get_Nutrition(str) :\n nutrition = pd.read_csv('C:/식품영양정보/영양정보.csv') \n print(nutrition[nutrition['음식명'] == str])\n\"\"\" \n#사용된 전체 이미지 출력\ndef drawing_plt():\n thisImg = os.listdir(caltech_dir)\n row = 4\n cols = int(math.ceil(len(thisImg)/4)) #반올림\n fig = plt.figure()\n i = 1\n \n for image in glob.glob(\"C:/cnnTest/*.jpg\"): #glob를 사용해서 Test로 사용된 파일 가져오기\n img = cv2.imread(image)\n subplot = fig.add_subplot(row, cols, i)\n subplot.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) #기본컬러\n subplot.set_title(thisImg[i-1]) #타이틀 붙이기\n subplot.axis(\"off\") \n i += 1\n print('\\t',\"전체 이미지 리스트 \")\n plt.show()\n\n#조건에 맞는 개별 이미지 출력\ndef get_Image(str):\n imgPath = 'C:/cnnTest/'\n image = cv2.imread(imgPath+str)\n image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n#데이터베이스에서 영양소 정보 가지고 오기\ndef get_DB_Nutrition(str):\n db = pymysql.connect(host=\"localhost\", user = \"yeha\", password=\"\", db=\"nutrition\")\n cur = db.cursor() #Connection에서 Cursor생성\n sql = \"SELECT * FROM NUTRITION_INFO WHERE FOODNAME LIKE '음식명' OR FOODNAME LIKE %s\"\n cur.execute(sql,(str))\n data = cur.fetchall() #정보 전부 가져오기\n df = pd.Series(data[0],data[1])\n print(df)\n db.close()\n\n\ncaltech_dir = \"C:/cnnTest\"\n\n#테스트할 데이터들을 128*128로 지정\nimage_w = 128\nimage_h = 128\npixels = image_h * image_w * 3 #픽셀 지정\n\nX = []\n#filenames = []\n\nfiles = os.listdir(caltech_dir) #하위 디렉터리 파일 리스트 구하기\n\n#print(files) #이미지 목록 확인 \n\nfor i in range(len(files)):\n files[i]=caltech_dir+'/'+ files[i]\n#print(files) \n\nfor f in files:\n img = Image.open(f)\n img = img.convert(\"RGB\")\n img = img.resize((image_w, image_h))\n data = np.asarray(img)\n # filenames.append(f)\n X.append(data)\n\nX = np.array(X)\n#print(X)\n\n#모델 불러오기\nfrom keras.models import load_model\n\nmodel = load_model(\"C:/image/train/model/multi_img_classification.model\")\nprediction = model.predict(X)\n#print(prediction)\n\nnp.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n\n\nprint('프로그램을 실행합니다..')\nprint('\\n')\nthisImg = os.listdir(caltech_dir)\ncnt = 0\n\nfor i in prediction:\n pre_ans = i.argmax() # 예측 레이블//가장 큰 번째 수\n #print(i)\n #print(pre_ans)\n pre_ans_str = ''\n if pre_ans == 0: pre_ans_str = \"연어회\"\n elif pre_ans == 1: pre_ans_str = \"쌀국수\"\n elif pre_ans == 2: pre_ans_str = \"샌드위치\"\n else: pre_ans_str = \"새우튀김\"\n\n if i[0] >= 0.8 : \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n\n if i[1] >= 0.8: \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n\n\n if i[2] >= 0.8: \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n\n if i[3] >= 0.8: \n get_Image(thisImg[cnt])\n print(thisImg[cnt]+\" 이미지는 \"+pre_ans_str+\"(으)로 추정됩니다.\")\n #get_Nutrition(pre_ans_str) \n get_DB_Nutrition(pre_ans_str)\n cnt += 1\n \ndrawing_plt()\n\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
drop_schema="DROP SCHEMA IF EXISTS sparkifydb;"
set_search_path="SET SEARCH_PATH to sparkifydb;"
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplay;"
user_table_drop = "DROP TABLE IF EXISTS sparkifydb.users;"
song_table_drop ="DROP TABLE IF EXISTS sparkifydb.songs;"
artist_table_drop = "DROP TABLE IF EXISTS sparkifydb.artists;"
time_table_drop = "DROP TABLE IF EXISTS sparkifydb.time;"
#CREATE SCHEMA
create_sparkify_schema="CREATE SCHEMA IF NOT EXISTS sparkifydb;"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE staging_events
(
event_id int identity(0,1) SORTKEY,
artist_name text NULL DISTKEY,
auth text NULL,
firstName text NULL,
gender varchar(5) NULL,
itemInSession bigint NULL,
lastName text NULL,
length double precision NULL,
level text NULL,
location text NULL,
method text NULL,
page text NULL,
registration text NULL,
sessionId bigint NULL,
song text NULL,
status int NULL,
ts text NULL,
userAgent text NULL,
userId bigint
);
""")
staging_songs_table_create = ("""
CREATE TABLE staging_songs
(
num_songs int,
artist_id varchar(255) DISTKEY,
artist_latitude varchar(255) NULL,
artist_longitude varchar(255) NULL,
artist_location varchar(255) NULL,
artist_name text NOT NULL,
song_id varchar(255) SORTKEY NOT NULL,
title text NOT NULL,
duration double precision NOT NULL,
year int NULL
);
""")
songplay_table_create = ("""
CREATE TABLE songplay
(
songplay_id int identity(0,1) PRIMARY KEY SORTKEY NOT NULL,
start_time timestamp NOT NULL,
user_id text NOT NULL,
level text,
song_id text NOT NULL,
artist_id text NOT NULL DISTKEY,
session_id text,
location text,
user_agent text);
""")
user_table_create = ("""
CREATE TABLE users(
user_id bigint PRIMARY KEY SORTKEY NOT NULL ,
first_name text,
last_name text,
gender varchar(10),
level text
)diststyle all;
""")
song_table_create = ("""
CREATE TABLE songs(
song_id varchar(255) SORTKEY PRIMARY KEY NOT NULL,
artist_id text NOT NULL,
year int,
duration double precision,
level text
)diststyle all;
""")
artist_table_create = ("""
CREATE TABLE artists(
artist_id text PRIMARY KEY SORTKEY,
artist_name text,
location text,
lattitude text,
longitude text
) diststyle all;
""")
time_table_create = ("""
CREATE TABLE time(
start_time timestamp PRIMARY KEY SORTKEY,
hour int,
day int,
week int,
month int,
year int,
weekday int
) diststyle all;
""")
# STAGING TABLES
staging_events_copy = ("""copy staging_events from '{}'
credentials 'aws_iam_role={}'
compupdate off
region 'us-west-2'
JSON '{}'
""").format(config['S3']['LOG_DATA'],config['IAM_ROLE']['ARN'],config['S3']['LOG_JSONPATH'])
staging_songs_copy = ("""copy staging_songs from '{}'
credentials 'aws_iam_role={}'
compupdate off
region 'us-west-2'
JSON 'auto'
""").format(config['S3']['SONG_DATA'],config['IAM_ROLE']['ARN'])
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplay(start_time,user_id,level,song_id,artist_id,session_id,location,user_agent)
SELECT
TIMESTAMP 'epoch' + se.ts/1000 * INTERVAL '1 Second ' AS start_time,
se.userId AS user_id,
se.level AS level,
ss.song_id AS song_id,
ss.artist_id AS artist_id,
se.sessionId AS session_id,
ss.artist_location AS location,
se.userAgent AS user_agent
FROM staging_songs AS ss
JOIN staging_events AS se ON (ss.title=se.song AND ss.artist_name=se.artist_name)
AND
se.page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users(user_id,first_name,last_name,gender,level)
SELECT DISTINCT(s.userId) AS user_id,
s.firstName AS first_name,
s.lastName AS last_name,
s.gender AS gender,
s.level AS level
FROM
staging_events as s
WHERE s.page = 'NextSong'
""")
song_table_insert = ("""
INSERT INTO songs (song_id,artist_id,year, duration)
SELECT DISTINCT(ss.song_id) AS song_id,
ss.artist_id AS artist_id,
ss.year AS year,
ss.duration AS duration
FROM
staging_songs AS ss
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id,artist_name,location,lattitude,longitude)
SELECT DISTINCT(s.artist_id) AS artist_id,
s.artist_name AS artist_name,
s.artist_location AS location,
s.artist_latitude AS lattitude,
s.artist_longitude AS longitude
FROM
staging_songs AS s;
""")
time_table_insert = ("""
INSERT INTO time (start_time,hour,day,week,month,year,weekday)
SELECT DISTINCT(TIMESTAMP 'epoch' + s.ts/1000 * INTERVAL '1 Second ') AS start_time,
EXTRACT(HOUR from start_time) AS hour,
EXTRACT(DAY from start_time) AS day,
EXTRACT(WEEK from start_time) AS week,
EXTRACT(MONTH from start_time) AS month,
EXTRACT(YEAR from start_time) AS year,
EXTRACT(DOW from start_time) AS weekday
FROM
staging_events AS s
WHERE
s.page = 'NextSong';
""")
# QUERY LISTS
create_table_queries =[set_search_path,songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create,staging_events_table_create,staging_songs_table_create]
drop_table_queries = [create_sparkify_schema,set_search_path,staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [set_search_path,staging_events_copy, staging_songs_copy]
insert_table_queries = [set_search_path,user_table_insert, song_table_insert, artist_table_insert, time_table_insert,songplay_table_insert]
|
normal
|
{
"blob_id": "652918e09a3506869c939be39b71a06467459f8a",
"index": 5992,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconfig.read('dwh.cfg')\n<mask token>\n",
"step-3": "<mask token>\nconfig = configparser.ConfigParser()\nconfig.read('dwh.cfg')\ndrop_schema = 'DROP SCHEMA IF EXISTS sparkifydb;'\nset_search_path = 'SET SEARCH_PATH to sparkifydb;'\nstaging_events_table_drop = 'DROP TABLE IF EXISTS staging_events;'\nstaging_songs_table_drop = 'DROP TABLE IF EXISTS staging_songs;'\nsongplay_table_drop = 'DROP TABLE IF EXISTS songplay;'\nuser_table_drop = 'DROP TABLE IF EXISTS sparkifydb.users;'\nsong_table_drop = 'DROP TABLE IF EXISTS sparkifydb.songs;'\nartist_table_drop = 'DROP TABLE IF EXISTS sparkifydb.artists;'\ntime_table_drop = 'DROP TABLE IF EXISTS sparkifydb.time;'\ncreate_sparkify_schema = 'CREATE SCHEMA IF NOT EXISTS sparkifydb;'\nstaging_events_table_create = \"\"\"\nCREATE TABLE staging_events\n(\nevent_id int identity(0,1) SORTKEY,\nartist_name text NULL DISTKEY,\nauth text NULL,\nfirstName text NULL,\ngender varchar(5) NULL,\nitemInSession bigint NULL,\nlastName text NULL,\nlength double precision NULL,\nlevel text NULL,\nlocation text NULL,\nmethod text NULL,\npage text NULL,\nregistration text NULL,\nsessionId bigint NULL,\nsong text NULL,\nstatus int NULL,\nts text NULL,\nuserAgent text NULL,\nuserId bigint \n);\n\"\"\"\nstaging_songs_table_create = \"\"\"\nCREATE TABLE staging_songs\n(\nnum_songs int,\nartist_id varchar(255) DISTKEY,\nartist_latitude varchar(255) NULL,\nartist_longitude varchar(255) NULL,\nartist_location varchar(255) NULL,\nartist_name text NOT NULL,\nsong_id varchar(255) SORTKEY NOT NULL,\ntitle text NOT NULL,\nduration double precision NOT NULL,\nyear int NULL\n);\n\"\"\"\nsongplay_table_create = \"\"\"\nCREATE TABLE songplay\n(\nsongplay_id int identity(0,1) PRIMARY KEY SORTKEY NOT NULL, \nstart_time timestamp NOT NULL, \nuser_id text NOT NULL, \nlevel text, \nsong_id text NOT NULL, \nartist_id text NOT NULL DISTKEY, \nsession_id text, \nlocation text, \nuser_agent text);\n\"\"\"\nuser_table_create = \"\"\"\nCREATE TABLE users(\nuser_id bigint PRIMARY KEY SORTKEY NOT NULL ,\nfirst_name text,\nlast_name text, \ngender varchar(10),\nlevel text\n)diststyle all;\n\"\"\"\nsong_table_create = \"\"\"\nCREATE TABLE songs(\nsong_id varchar(255) SORTKEY PRIMARY KEY NOT NULL,\nartist_id text NOT NULL,\nyear int, \nduration double precision,\nlevel text\n)diststyle all;\n\"\"\"\nartist_table_create = \"\"\"\nCREATE TABLE artists(\nartist_id text PRIMARY KEY SORTKEY, \nartist_name text, \nlocation text, \nlattitude text, \nlongitude text\n) diststyle all;\n\n\"\"\"\ntime_table_create = \"\"\"\nCREATE TABLE time(\nstart_time timestamp PRIMARY KEY SORTKEY,\nhour int,\nday int,\nweek int,\nmonth int,\nyear int,\nweekday int\n) diststyle all;\n\"\"\"\nstaging_events_copy = (\n \"\"\"copy staging_events from '{}'\n credentials 'aws_iam_role={}'\n compupdate off \n region 'us-west-2'\n JSON '{}'\n\"\"\"\n .format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config[\n 'S3']['LOG_JSONPATH']))\nstaging_songs_copy = (\n \"\"\"copy staging_songs from '{}'\n credentials 'aws_iam_role={}'\n compupdate off \n region 'us-west-2' \n JSON 'auto'\n\"\"\"\n .format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN']))\nsongplay_table_insert = \"\"\"\nINSERT INTO songplay(start_time,user_id,level,song_id,artist_id,session_id,location,user_agent)\n\nSELECT\n TIMESTAMP 'epoch' + se.ts/1000 * INTERVAL '1 Second ' AS start_time,\n se.userId AS user_id,\n se.level AS level,\n ss.song_id AS song_id,\n ss.artist_id AS artist_id,\n se.sessionId AS session_id,\n ss.artist_location AS location,\n se.userAgent AS user_agent\nFROM staging_songs AS ss \nJOIN staging_events AS se ON (ss.title=se.song AND ss.artist_name=se.artist_name)\nAND\n se.page = 'NextSong';\n \n\"\"\"\nuser_table_insert = \"\"\"\nINSERT INTO users(user_id,first_name,last_name,gender,level)\n\nSELECT DISTINCT(s.userId) AS user_id,\n s.firstName AS first_name,\n s.lastName AS last_name,\n s.gender AS gender,\n s.level AS level\n\nFROM\n staging_events as s\nWHERE s.page = 'NextSong' \n\n\"\"\"\nsong_table_insert = \"\"\"\nINSERT INTO songs (song_id,artist_id,year, duration)\n\nSELECT DISTINCT(ss.song_id) AS song_id,\n ss.artist_id AS artist_id,\n ss.year AS year,\n ss.duration AS duration\nFROM\n staging_songs AS ss\n\n\"\"\"\nartist_table_insert = \"\"\"\nINSERT INTO artists (artist_id,artist_name,location,lattitude,longitude)\n\nSELECT DISTINCT(s.artist_id) AS artist_id,\n s.artist_name AS artist_name,\n s.artist_location AS location,\n s.artist_latitude AS lattitude,\n s.artist_longitude AS longitude\nFROM\n staging_songs AS s;\n\"\"\"\ntime_table_insert = \"\"\"\nINSERT INTO time (start_time,hour,day,week,month,year,weekday)\n\nSELECT DISTINCT(TIMESTAMP 'epoch' + s.ts/1000 * INTERVAL '1 Second ') AS start_time,\n EXTRACT(HOUR from start_time) AS hour,\n EXTRACT(DAY from start_time) AS day,\n EXTRACT(WEEK from start_time) AS week,\n EXTRACT(MONTH from start_time) AS month,\n EXTRACT(YEAR from start_time) AS year,\n EXTRACT(DOW from start_time) AS weekday\nFROM \n staging_events AS s\nWHERE \n s.page = 'NextSong'; \n\n\"\"\"\ncreate_table_queries = [set_search_path, songplay_table_create,\n user_table_create, song_table_create, artist_table_create,\n time_table_create, staging_events_table_create, staging_songs_table_create]\ndrop_table_queries = [create_sparkify_schema, set_search_path,\n staging_events_table_drop, staging_songs_table_drop,\n songplay_table_drop, user_table_drop, song_table_drop,\n artist_table_drop, time_table_drop]\ncopy_table_queries = [set_search_path, staging_events_copy, staging_songs_copy]\ninsert_table_queries = [set_search_path, user_table_insert,\n song_table_insert, artist_table_insert, time_table_insert,\n songplay_table_insert]\n",
"step-4": "import configparser\nconfig = configparser.ConfigParser()\nconfig.read('dwh.cfg')\ndrop_schema = 'DROP SCHEMA IF EXISTS sparkifydb;'\nset_search_path = 'SET SEARCH_PATH to sparkifydb;'\nstaging_events_table_drop = 'DROP TABLE IF EXISTS staging_events;'\nstaging_songs_table_drop = 'DROP TABLE IF EXISTS staging_songs;'\nsongplay_table_drop = 'DROP TABLE IF EXISTS songplay;'\nuser_table_drop = 'DROP TABLE IF EXISTS sparkifydb.users;'\nsong_table_drop = 'DROP TABLE IF EXISTS sparkifydb.songs;'\nartist_table_drop = 'DROP TABLE IF EXISTS sparkifydb.artists;'\ntime_table_drop = 'DROP TABLE IF EXISTS sparkifydb.time;'\ncreate_sparkify_schema = 'CREATE SCHEMA IF NOT EXISTS sparkifydb;'\nstaging_events_table_create = \"\"\"\nCREATE TABLE staging_events\n(\nevent_id int identity(0,1) SORTKEY,\nartist_name text NULL DISTKEY,\nauth text NULL,\nfirstName text NULL,\ngender varchar(5) NULL,\nitemInSession bigint NULL,\nlastName text NULL,\nlength double precision NULL,\nlevel text NULL,\nlocation text NULL,\nmethod text NULL,\npage text NULL,\nregistration text NULL,\nsessionId bigint NULL,\nsong text NULL,\nstatus int NULL,\nts text NULL,\nuserAgent text NULL,\nuserId bigint \n);\n\"\"\"\nstaging_songs_table_create = \"\"\"\nCREATE TABLE staging_songs\n(\nnum_songs int,\nartist_id varchar(255) DISTKEY,\nartist_latitude varchar(255) NULL,\nartist_longitude varchar(255) NULL,\nartist_location varchar(255) NULL,\nartist_name text NOT NULL,\nsong_id varchar(255) SORTKEY NOT NULL,\ntitle text NOT NULL,\nduration double precision NOT NULL,\nyear int NULL\n);\n\"\"\"\nsongplay_table_create = \"\"\"\nCREATE TABLE songplay\n(\nsongplay_id int identity(0,1) PRIMARY KEY SORTKEY NOT NULL, \nstart_time timestamp NOT NULL, \nuser_id text NOT NULL, \nlevel text, \nsong_id text NOT NULL, \nartist_id text NOT NULL DISTKEY, \nsession_id text, \nlocation text, \nuser_agent text);\n\"\"\"\nuser_table_create = \"\"\"\nCREATE TABLE users(\nuser_id bigint PRIMARY KEY SORTKEY NOT NULL ,\nfirst_name text,\nlast_name text, \ngender varchar(10),\nlevel text\n)diststyle all;\n\"\"\"\nsong_table_create = \"\"\"\nCREATE TABLE songs(\nsong_id varchar(255) SORTKEY PRIMARY KEY NOT NULL,\nartist_id text NOT NULL,\nyear int, \nduration double precision,\nlevel text\n)diststyle all;\n\"\"\"\nartist_table_create = \"\"\"\nCREATE TABLE artists(\nartist_id text PRIMARY KEY SORTKEY, \nartist_name text, \nlocation text, \nlattitude text, \nlongitude text\n) diststyle all;\n\n\"\"\"\ntime_table_create = \"\"\"\nCREATE TABLE time(\nstart_time timestamp PRIMARY KEY SORTKEY,\nhour int,\nday int,\nweek int,\nmonth int,\nyear int,\nweekday int\n) diststyle all;\n\"\"\"\nstaging_events_copy = (\n \"\"\"copy staging_events from '{}'\n credentials 'aws_iam_role={}'\n compupdate off \n region 'us-west-2'\n JSON '{}'\n\"\"\"\n .format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config[\n 'S3']['LOG_JSONPATH']))\nstaging_songs_copy = (\n \"\"\"copy staging_songs from '{}'\n credentials 'aws_iam_role={}'\n compupdate off \n region 'us-west-2' \n JSON 'auto'\n\"\"\"\n .format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN']))\nsongplay_table_insert = \"\"\"\nINSERT INTO songplay(start_time,user_id,level,song_id,artist_id,session_id,location,user_agent)\n\nSELECT\n TIMESTAMP 'epoch' + se.ts/1000 * INTERVAL '1 Second ' AS start_time,\n se.userId AS user_id,\n se.level AS level,\n ss.song_id AS song_id,\n ss.artist_id AS artist_id,\n se.sessionId AS session_id,\n ss.artist_location AS location,\n se.userAgent AS user_agent\nFROM staging_songs AS ss \nJOIN staging_events AS se ON (ss.title=se.song AND ss.artist_name=se.artist_name)\nAND\n se.page = 'NextSong';\n \n\"\"\"\nuser_table_insert = \"\"\"\nINSERT INTO users(user_id,first_name,last_name,gender,level)\n\nSELECT DISTINCT(s.userId) AS user_id,\n s.firstName AS first_name,\n s.lastName AS last_name,\n s.gender AS gender,\n s.level AS level\n\nFROM\n staging_events as s\nWHERE s.page = 'NextSong' \n\n\"\"\"\nsong_table_insert = \"\"\"\nINSERT INTO songs (song_id,artist_id,year, duration)\n\nSELECT DISTINCT(ss.song_id) AS song_id,\n ss.artist_id AS artist_id,\n ss.year AS year,\n ss.duration AS duration\nFROM\n staging_songs AS ss\n\n\"\"\"\nartist_table_insert = \"\"\"\nINSERT INTO artists (artist_id,artist_name,location,lattitude,longitude)\n\nSELECT DISTINCT(s.artist_id) AS artist_id,\n s.artist_name AS artist_name,\n s.artist_location AS location,\n s.artist_latitude AS lattitude,\n s.artist_longitude AS longitude\nFROM\n staging_songs AS s;\n\"\"\"\ntime_table_insert = \"\"\"\nINSERT INTO time (start_time,hour,day,week,month,year,weekday)\n\nSELECT DISTINCT(TIMESTAMP 'epoch' + s.ts/1000 * INTERVAL '1 Second ') AS start_time,\n EXTRACT(HOUR from start_time) AS hour,\n EXTRACT(DAY from start_time) AS day,\n EXTRACT(WEEK from start_time) AS week,\n EXTRACT(MONTH from start_time) AS month,\n EXTRACT(YEAR from start_time) AS year,\n EXTRACT(DOW from start_time) AS weekday\nFROM \n staging_events AS s\nWHERE \n s.page = 'NextSong'; \n\n\"\"\"\ncreate_table_queries = [set_search_path, songplay_table_create,\n user_table_create, song_table_create, artist_table_create,\n time_table_create, staging_events_table_create, staging_songs_table_create]\ndrop_table_queries = [create_sparkify_schema, set_search_path,\n staging_events_table_drop, staging_songs_table_drop,\n songplay_table_drop, user_table_drop, song_table_drop,\n artist_table_drop, time_table_drop]\ncopy_table_queries = [set_search_path, staging_events_copy, staging_songs_copy]\ninsert_table_queries = [set_search_path, user_table_insert,\n song_table_insert, artist_table_insert, time_table_insert,\n songplay_table_insert]\n",
"step-5": "import configparser\n\n\n# CONFIG\nconfig = configparser.ConfigParser()\nconfig.read('dwh.cfg')\n\n# DROP TABLES\ndrop_schema=\"DROP SCHEMA IF EXISTS sparkifydb;\"\nset_search_path=\"SET SEARCH_PATH to sparkifydb;\"\nstaging_events_table_drop = \"DROP TABLE IF EXISTS staging_events;\"\nstaging_songs_table_drop = \"DROP TABLE IF EXISTS staging_songs;\"\nsongplay_table_drop = \"DROP TABLE IF EXISTS songplay;\"\nuser_table_drop = \"DROP TABLE IF EXISTS sparkifydb.users;\"\nsong_table_drop =\"DROP TABLE IF EXISTS sparkifydb.songs;\"\nartist_table_drop = \"DROP TABLE IF EXISTS sparkifydb.artists;\"\ntime_table_drop = \"DROP TABLE IF EXISTS sparkifydb.time;\"\n\n#CREATE SCHEMA\n\ncreate_sparkify_schema=\"CREATE SCHEMA IF NOT EXISTS sparkifydb;\"\n\n# CREATE TABLES\n\nstaging_events_table_create= (\"\"\"\nCREATE TABLE staging_events\n(\nevent_id int identity(0,1) SORTKEY,\nartist_name text NULL DISTKEY,\nauth text NULL,\nfirstName text NULL,\ngender varchar(5) NULL,\nitemInSession bigint NULL,\nlastName text NULL,\nlength double precision NULL,\nlevel text NULL,\nlocation text NULL,\nmethod text NULL,\npage text NULL,\nregistration text NULL,\nsessionId bigint NULL,\nsong text NULL,\nstatus int NULL,\nts text NULL,\nuserAgent text NULL,\nuserId bigint \n);\n\"\"\")\n\nstaging_songs_table_create = (\"\"\"\nCREATE TABLE staging_songs\n(\nnum_songs int,\nartist_id varchar(255) DISTKEY,\nartist_latitude varchar(255) NULL,\nartist_longitude varchar(255) NULL,\nartist_location varchar(255) NULL,\nartist_name text NOT NULL,\nsong_id varchar(255) SORTKEY NOT NULL,\ntitle text NOT NULL,\nduration double precision NOT NULL,\nyear int NULL\n);\n\"\"\")\n\nsongplay_table_create = (\"\"\"\nCREATE TABLE songplay\n(\nsongplay_id int identity(0,1) PRIMARY KEY SORTKEY NOT NULL, \nstart_time timestamp NOT NULL, \nuser_id text NOT NULL, \nlevel text, \nsong_id text NOT NULL, \nartist_id text NOT NULL DISTKEY, \nsession_id text, \nlocation text, \nuser_agent text);\n\"\"\")\n\nuser_table_create = (\"\"\"\nCREATE TABLE users(\nuser_id bigint PRIMARY KEY SORTKEY NOT NULL ,\nfirst_name text,\nlast_name text, \ngender varchar(10),\nlevel text\n)diststyle all;\n\"\"\")\n\nsong_table_create = (\"\"\"\nCREATE TABLE songs(\nsong_id varchar(255) SORTKEY PRIMARY KEY NOT NULL,\nartist_id text NOT NULL,\nyear int, \nduration double precision,\nlevel text\n)diststyle all;\n\"\"\")\n\nartist_table_create = (\"\"\"\nCREATE TABLE artists(\nartist_id text PRIMARY KEY SORTKEY, \nartist_name text, \nlocation text, \nlattitude text, \nlongitude text\n) diststyle all;\n\n\"\"\")\n\ntime_table_create = (\"\"\"\nCREATE TABLE time(\nstart_time timestamp PRIMARY KEY SORTKEY,\nhour int,\nday int,\nweek int,\nmonth int,\nyear int,\nweekday int\n) diststyle all;\n\"\"\")\n\n# STAGING TABLES\n\nstaging_events_copy = (\"\"\"copy staging_events from '{}'\n credentials 'aws_iam_role={}'\n compupdate off \n region 'us-west-2'\n JSON '{}'\n\"\"\").format(config['S3']['LOG_DATA'],config['IAM_ROLE']['ARN'],config['S3']['LOG_JSONPATH'])\n\nstaging_songs_copy = (\"\"\"copy staging_songs from '{}'\n credentials 'aws_iam_role={}'\n compupdate off \n region 'us-west-2' \n JSON 'auto'\n\"\"\").format(config['S3']['SONG_DATA'],config['IAM_ROLE']['ARN'])\n\n# FINAL TABLES\n\nsongplay_table_insert = (\"\"\"\nINSERT INTO songplay(start_time,user_id,level,song_id,artist_id,session_id,location,user_agent)\n\nSELECT\n TIMESTAMP 'epoch' + se.ts/1000 * INTERVAL '1 Second ' AS start_time,\n se.userId AS user_id,\n se.level AS level,\n ss.song_id AS song_id,\n ss.artist_id AS artist_id,\n se.sessionId AS session_id,\n ss.artist_location AS location,\n se.userAgent AS user_agent\nFROM staging_songs AS ss \nJOIN staging_events AS se ON (ss.title=se.song AND ss.artist_name=se.artist_name)\nAND\n se.page = 'NextSong';\n \n\"\"\")\n\nuser_table_insert = (\"\"\"\nINSERT INTO users(user_id,first_name,last_name,gender,level)\n\nSELECT DISTINCT(s.userId) AS user_id,\n s.firstName AS first_name,\n s.lastName AS last_name,\n s.gender AS gender,\n s.level AS level\n\nFROM\n staging_events as s\nWHERE s.page = 'NextSong' \n\n\"\"\")\n\nsong_table_insert = (\"\"\"\nINSERT INTO songs (song_id,artist_id,year, duration)\n\nSELECT DISTINCT(ss.song_id) AS song_id,\n ss.artist_id AS artist_id,\n ss.year AS year,\n ss.duration AS duration\nFROM\n staging_songs AS ss\n\n\"\"\")\n\nartist_table_insert = (\"\"\"\nINSERT INTO artists (artist_id,artist_name,location,lattitude,longitude)\n\nSELECT DISTINCT(s.artist_id) AS artist_id,\n s.artist_name AS artist_name,\n s.artist_location AS location,\n s.artist_latitude AS lattitude,\n s.artist_longitude AS longitude\nFROM\n staging_songs AS s;\n\"\"\")\n\ntime_table_insert = (\"\"\"\nINSERT INTO time (start_time,hour,day,week,month,year,weekday)\n\nSELECT DISTINCT(TIMESTAMP 'epoch' + s.ts/1000 * INTERVAL '1 Second ') AS start_time,\n EXTRACT(HOUR from start_time) AS hour,\n EXTRACT(DAY from start_time) AS day,\n EXTRACT(WEEK from start_time) AS week,\n EXTRACT(MONTH from start_time) AS month,\n EXTRACT(YEAR from start_time) AS year,\n EXTRACT(DOW from start_time) AS weekday\nFROM \n staging_events AS s\nWHERE \n s.page = 'NextSong'; \n\n\"\"\")\n\n# QUERY LISTS\n\ncreate_table_queries =[set_search_path,songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create,staging_events_table_create,staging_songs_table_create]\n\ndrop_table_queries = [create_sparkify_schema,set_search_path,staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]\n\ncopy_table_queries = [set_search_path,staging_events_copy, staging_songs_copy]\n\ninsert_table_queries = [set_search_path,user_table_insert, song_table_insert, artist_table_insert, time_table_insert,songplay_table_insert]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 2 17:24:00 2016
@author: pasca
"""
# -*- coding: utf-8 -*-
import os.path as op
from nipype.utils.filemanip import split_filename as split_f
from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec
from nipype.interfaces.base import traits, File, TraitedSpec
from neuropype_ephy.compute_inv_problem import compute_ROIs_inv_sol
from neuropype_ephy.preproc import create_reject_dict
from mne import find_events, compute_covariance, pick_types, write_cov, Epochs
from mne.io import Raw
class InverseSolutionConnInputSpec(BaseInterfaceInputSpec):
sbj_id = traits.String(desc='subject id', mandatory=True)
sbj_dir = traits.Directory(exists=True, desc='Freesurfer main directory',
mandatory=True)
raw_filename = traits.File(exists=True, desc='raw filename', mandatory=True)
cov_filename = traits.File(exists=True, desc='Noise Covariance matrix',
mandatory=True)
fwd_filename = traits.File(exists=True, desc='LF matrix', mandatory=True)
is_epoched = traits.Bool(desc='if true raw data will be epoched',
mandatory=False)
events_id = traits.Dict(None, desc='the id of all events to consider.', mandatory=False)
event_id = traits.Int(None, desc='the id of the event to consider.', mandatory=False)
t_min = traits.Float(None, desc='start time before event', mandatory=False)
t_max = traits.Float(None, desc='end time after event', mandatory=False)
is_evoked = traits.Bool(desc='if true if we want to analyze evoked data',
mandatory=False)
inv_method = traits.String(desc='possible inverse methods are \
sLORETA, MNE, dSPM', mandatory=True)
snr = traits.Float(1.0, usedefault=True, desc='use smaller SNR for \
raw data', mandatory=False)
parc = traits.String('aparc', usedefault=True,
desc='the parcellation to use: aparc vs aparc.a2009s',
mandatory=False)
aseg = traits.Bool(desc='if true sub structures will be considered',
mandatory=False)
aseg_labels = traits.List(desc='list of substructures in the src space',
mandatory=False)
is_blind = traits.Bool(desc='if in the source space there are ROI removed',
mandatory=False)
labels_removed = traits.List(desc='list of label we consider in the blind case',
mandatory=False)
class InverseSolutionConnOutputSpec(TraitedSpec):
ts_file = File(exists=False, desc='source reconstruction in .npy format')
labels = File(exists=False, desc='labels file in pickle format')
label_names = File(exists=False, desc='labels name file in txt format')
label_coords = File(exists=False, desc='labels coords file in txt format')
class InverseSolution(BaseInterface):
"""
Compute the inverse solution on raw data considering N_r regions in source
space based on a FreeSurfer cortical parcellation
"""
input_spec = InverseSolutionConnInputSpec
output_spec = InverseSolutionConnOutputSpec
def _run_interface(self, runtime):
sbj_id = self.inputs.sbj_id
sbj_dir = self.inputs.sbj_dir
raw_filename = self.inputs.raw_filename
cov_filename = self.inputs.cov_filename
fwd_filename = self.inputs.fwd_filename
is_epoched = self.inputs.is_epoched
event_id = self.inputs.event_id
t_min = self.inputs.t_min
t_max = self.inputs.t_max
is_evoked = self.inputs.is_evoked
events_id = self.inputs.events_id
inv_method = self.inputs.inv_method
snr = self.inputs.snr
parc = self.inputs.parc
aseg = self.inputs.aseg
aseg_labels = self.inputs.aseg_labels
is_blind = self.inputs.is_blind
labels_removed = self.inputs.labels_removed
self.ts_file, self.labels , self.label_names, self.label_coords= compute_ROIs_inv_sol(raw_filename, sbj_id, sbj_dir,
fwd_filename,
cov_filename,
is_epoched,
event_id, t_min, t_max,
is_evoked,
events_id,
snr, inv_method, parc,
aseg, aseg_labels,
is_blind, labels_removed)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['ts_file'] = self.ts_file
outputs['labels'] = self.labels
outputs['label_names'] = self.label_names
outputs['label_coords'] = self.label_coords
return outputs
class NoiseCovarianceConnInputSpec(BaseInterfaceInputSpec):
cov_fname_in = traits.File(exists=False, desc='file name for Noise Covariance Matrix')
raw_filename = traits.File(exists=True, desc='raw data filename')
is_epoched = traits.Bool(desc='if true if we want to epoch the data',
mandatory=False)
is_evoked = traits.Bool(desc='if true if we want to analyze evoked data',
mandatory=False)
events_id = traits.Dict(None, desc='the id of all events to consider.', mandatory=False)
t_min = traits.Float(None, desc='start time before event', mandatory=False)
t_max = traits.Float(None, desc='end time after event', mandatory=False)
class NoiseCovarianceConnOutputSpec(TraitedSpec):
cov_fname_out = File(exists=False, desc='LF matrix')
class NoiseCovariance(BaseInterface):
"""
Compute the inverse solution on raw data considering N_r regions in source
space based on a FreeSurfer cortical parcellation
"""
input_spec = NoiseCovarianceConnInputSpec
output_spec = NoiseCovarianceConnOutputSpec
def _run_interface(self, runtime):
raw_filename = self.inputs.raw_filename
cov_fname_in = self.inputs.cov_fname_in
is_epoched = self.inputs.is_epoched
is_evoked = self.inputs.is_evoked
events_id = self.inputs.events_id
t_min = self.inputs.t_min
t_max = self.inputs.t_max
if cov_fname_in == '' or not op.exists(cov_fname_in):
if is_epoched and is_evoked:
raw = Raw(raw_filename)
events = find_events(raw)
data_path, basename, ext = split_f(raw.info['filename'])
self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)
if not op.exists(self.cov_fname_out):
print '\n*** COMPUTE COV FROM EPOCHS ***\n' + self.cov_fname_out
reject = create_reject_dict(raw.info)
picks = pick_types(raw.info, meg=True, ref_meg=False,
exclude='bads')
epochs = Epochs(raw, events, events_id, t_min, t_max,
picks=picks, baseline=(None, 0),
reject=reject)
# TODO method='auto'? too long!!!
noise_cov = compute_covariance(epochs, tmax=0,
method='diagonal_fixed')
write_cov(self.cov_fname_out, noise_cov)
else:
print '\n *** NOISE cov file %s exists!!! \n' % self.cov_fname_out
else:
'\n *** NO EPOCH DATA \n'
else:
print '\n *** NOISE cov file %s exists!!! \n' % cov_fname_in
self.cov_fname_out = cov_fname_in
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['cov_fname_out'] = self.cov_fname_out
return outputs
|
normal
|
{
"blob_id": "d9cdcf64042c3c6c4b45ec0e3334ba756dd43fcd",
"index": 5066,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 2 17:24:00 2016\n\n@author: pasca\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport os.path as op\n\nfrom nipype.utils.filemanip import split_filename as split_f\n\nfrom nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec\nfrom nipype.interfaces.base import traits, File, TraitedSpec\n\nfrom neuropype_ephy.compute_inv_problem import compute_ROIs_inv_sol\nfrom neuropype_ephy.preproc import create_reject_dict\nfrom mne import find_events, compute_covariance, pick_types, write_cov, Epochs\nfrom mne.io import Raw\n\n\nclass InverseSolutionConnInputSpec(BaseInterfaceInputSpec):\n\n sbj_id = traits.String(desc='subject id', mandatory=True)\n\n sbj_dir = traits.Directory(exists=True, desc='Freesurfer main directory',\n mandatory=True)\n\n raw_filename = traits.File(exists=True, desc='raw filename', mandatory=True)\n\n cov_filename = traits.File(exists=True, desc='Noise Covariance matrix',\n mandatory=True)\n\n fwd_filename = traits.File(exists=True, desc='LF matrix', mandatory=True)\n\n is_epoched = traits.Bool(desc='if true raw data will be epoched',\n mandatory=False)\n \n events_id = traits.Dict(None, desc='the id of all events to consider.', mandatory=False) \n \n event_id = traits.Int(None, desc='the id of the event to consider.', mandatory=False)\n \n t_min = traits.Float(None, desc='start time before event', mandatory=False)\n\n t_max = traits.Float(None, desc='end time after event', mandatory=False)\n \n is_evoked = traits.Bool(desc='if true if we want to analyze evoked data',\n mandatory=False)\n\n inv_method = traits.String(desc='possible inverse methods are \\\n sLORETA, MNE, dSPM', mandatory=True)\n\n snr = traits.Float(1.0, usedefault=True, desc='use smaller SNR for \\\n raw data', mandatory=False)\n\n parc = traits.String('aparc', usedefault=True,\n desc='the parcellation to use: aparc vs aparc.a2009s',\n mandatory=False)\n\n aseg = traits.Bool(desc='if true sub structures will be considered',\n mandatory=False)\n\n aseg_labels = traits.List(desc='list of substructures in the src space',\n mandatory=False)\n\n is_blind = traits.Bool(desc='if in the source space there are ROI removed',\n mandatory=False)\n\n labels_removed = traits.List(desc='list of label we consider in the blind case',\n mandatory=False)\n\n\nclass InverseSolutionConnOutputSpec(TraitedSpec):\n\n ts_file = File(exists=False, desc='source reconstruction in .npy format')\n labels = File(exists=False, desc='labels file in pickle format')\n label_names = File(exists=False, desc='labels name file in txt format')\n label_coords = File(exists=False, desc='labels coords file in txt format')\n\n\nclass InverseSolution(BaseInterface):\n \"\"\"\n Compute the inverse solution on raw data considering N_r regions in source\n space based on a FreeSurfer cortical parcellation\n \"\"\"\n input_spec = InverseSolutionConnInputSpec\n output_spec = InverseSolutionConnOutputSpec\n\n def _run_interface(self, runtime):\n\n sbj_id = self.inputs.sbj_id\n sbj_dir = self.inputs.sbj_dir\n raw_filename = self.inputs.raw_filename\n cov_filename = self.inputs.cov_filename\n fwd_filename = self.inputs.fwd_filename\n is_epoched = self.inputs.is_epoched\n event_id = self.inputs.event_id\n t_min = self.inputs.t_min\n t_max = self.inputs.t_max\n is_evoked = self.inputs.is_evoked\n events_id = self.inputs.events_id\n inv_method = self.inputs.inv_method\n snr = self.inputs.snr\n parc = self.inputs.parc\n aseg = self.inputs.aseg\n aseg_labels = self.inputs.aseg_labels\n is_blind = self.inputs.is_blind\n labels_removed = self.inputs.labels_removed\n\n self.ts_file, self.labels , self.label_names, self.label_coords= compute_ROIs_inv_sol(raw_filename, sbj_id, sbj_dir,\n fwd_filename,\n cov_filename,\n is_epoched,\n event_id, t_min, t_max,\n is_evoked,\n events_id,\n snr, inv_method, parc,\n aseg, aseg_labels,\n is_blind, labels_removed)\n\n return runtime\n\n def _list_outputs(self):\n\n outputs = self._outputs().get()\n\n outputs['ts_file'] = self.ts_file\n outputs['labels'] = self.labels\n outputs['label_names'] = self.label_names\n outputs['label_coords'] = self.label_coords\n\n return outputs\n\n\nclass NoiseCovarianceConnInputSpec(BaseInterfaceInputSpec):\n\n cov_fname_in = traits.File(exists=False, desc='file name for Noise Covariance Matrix')\n\n raw_filename = traits.File(exists=True, desc='raw data filename')\n\n is_epoched = traits.Bool(desc='if true if we want to epoch the data',\n mandatory=False)\n\n is_evoked = traits.Bool(desc='if true if we want to analyze evoked data',\n mandatory=False)\n\n events_id = traits.Dict(None, desc='the id of all events to consider.', mandatory=False)\n\n t_min = traits.Float(None, desc='start time before event', mandatory=False)\n\n t_max = traits.Float(None, desc='end time after event', mandatory=False)\n\n\nclass NoiseCovarianceConnOutputSpec(TraitedSpec):\n\n cov_fname_out = File(exists=False, desc='LF matrix')\n\n\nclass NoiseCovariance(BaseInterface):\n \"\"\"\n Compute the inverse solution on raw data considering N_r regions in source\n space based on a FreeSurfer cortical parcellation\n \"\"\"\n input_spec = NoiseCovarianceConnInputSpec\n output_spec = NoiseCovarianceConnOutputSpec\n\n def _run_interface(self, runtime):\n\n raw_filename = self.inputs.raw_filename\n cov_fname_in = self.inputs.cov_fname_in\n is_epoched = self.inputs.is_epoched\n is_evoked = self.inputs.is_evoked\n events_id = self.inputs.events_id\n t_min = self.inputs.t_min\n t_max = self.inputs.t_max\n\n if cov_fname_in == '' or not op.exists(cov_fname_in):\n\n if is_epoched and is_evoked:\n raw = Raw(raw_filename)\n events = find_events(raw)\n\n data_path, basename, ext = split_f(raw.info['filename'])\n self.cov_fname_out = op.join(data_path, '%s-cov.fif' % basename)\n\n if not op.exists(self.cov_fname_out):\n print '\\n*** COMPUTE COV FROM EPOCHS ***\\n' + self.cov_fname_out\n\n reject = create_reject_dict(raw.info)\n \n picks = pick_types(raw.info, meg=True, ref_meg=False,\n exclude='bads')\n\n epochs = Epochs(raw, events, events_id, t_min, t_max,\n picks=picks, baseline=(None, 0),\n reject=reject)\n\n # TODO method='auto'? too long!!!\n noise_cov = compute_covariance(epochs, tmax=0,\n method='diagonal_fixed')\n write_cov(self.cov_fname_out, noise_cov)\n else:\n print '\\n *** NOISE cov file %s exists!!! \\n' % self.cov_fname_out\n else:\n '\\n *** NO EPOCH DATA \\n'\n\n else:\n print '\\n *** NOISE cov file %s exists!!! \\n' % cov_fname_in\n self.cov_fname_out = cov_fname_in\n\n return runtime\n\n def _list_outputs(self):\n\n outputs = self._outputs().get()\n\n outputs['cov_fname_out'] = self.cov_fname_out\n\n return outputs\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(
chunk_n) + '.pts')
sensor_file = open(sensor_file_path, 'w')
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.
sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,
orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(
mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in
zip(sensor_surfaces, sensor_intersection)]
return (sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection)
<|reserved_special_token_0|>
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(
geometry_pickle_dir, 'zone', building_name))
(sensors_dir_building, sensors_coords_building,
sensors_type_building, sensors_area_building,
sensor_orientation_building, sensor_intersection_building
) = calc_sensors_building(building_geometry, grid_size)
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = [('srf' + str(x)) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
sensor_intersection_zone.append(sensor_intersection_building)
names_zone.append(building_name)
pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,
'orientation': sensor_orientation_building, 'intersection':
sensor_intersection_building, 'Xcoor': [x[0] for x in
sensors_coords_building], 'Ycoor': [x[1] for x in
sensors_coords_building], 'Zcoor': [x[2] for x in
sensors_coords_building], 'Xdir': [x[0] for x in
sensors_dir_building], 'Ydir': [x[1] for x in
sensors_dir_building], 'Zdir': [x[2] for x in
sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':
sensors_type_building}).to_csv(locator.get_radiation_metadata(
building_name), index=None)
return (sensors_coords_zone, sensors_dir_zone,
sensors_total_number_list, names_zone, sensors_code_zone,
sensor_intersection_zone)
def isolation_daysim(chunk_n, cea_daysim, building_names, locator,
radiance_parameters, write_sensor_data, grid_size, max_global,
weatherfile, geometry_pickle_dir):
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.
format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=
daysim_project.project_path))
print('Calculating and sending sensor points')
(sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,
sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(
building_names, locator, grid_size, geometry_pickle_dir))
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone,
sensors_dir_zone, num_sensors, 'w/m2')
print('Starting Daysim simulation for buildings: {buildings}'.format(
buildings=names_zone))
print('Total number of sensors: {num_sensors}'.format(num_sensors=
num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],
radiance_parameters['rad_ad'], radiance_parameters['rad_as'],
radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],
radiance_parameters['rad_lr'], radiance_parameters['rad_st'],
radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],
radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],
radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print('Writing results to disk')
index = 0
for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(
names_zone, sensors_number_zone, sensors_code_zone,
sensor_intersection_zone):
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building,
selection_of_results.tolist()))
index = index + sensors_number_building
write_aggregated_results(building_name,
items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name,
items_sensor_name_and_result, locator)
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w'
) as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result,
locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',
'windows_south_kW', 'windows_north_kW', 'walls_east_kW',
'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',
'windows_south_m2', 'windows_north_m2', 'walls_east_m2',
'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields,
solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index(
'SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])) for surface in
select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)
data_aggregated_kW['Date'] = weatherfile['date']
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(
chunk_n) + '.pts')
sensor_file = open(sensor_file_path, 'w')
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.
sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,
orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(
mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in
zip(sensor_surfaces, sensor_intersection)]
return (sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection)
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size['walls_grid']
sensor_horizontal_grid_dim = grid_size['roof_grid']
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry,
'orientation_{srf_type}'.format(srf_type=srf_type))
normals_list = getattr(building_geometry, 'normals_{srf_type}'.
format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry,
'orientation_{srf_type}'.format(srf_type=srf_type))
normals_list = getattr(building_geometry, 'normals_{srf_type}'.
format(srf_type=srf_type))
interesection_list = getattr(building_geometry,
'intersect_{srf_type}'.format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list,
normals_list, occface_list, interesection_list):
(sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection) = (
generate_sensor_surfaces(face, sensor_vertical_grid_dim,
sensor_horizontal_grid_dim, srf_type, orientation, normal,
intersection))
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return (sensor_dir_list, sensor_cord_list, sensor_type_list,
sensor_area_list, sensor_orientation_list, sensor_intersection_list)
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(
geometry_pickle_dir, 'zone', building_name))
(sensors_dir_building, sensors_coords_building,
sensors_type_building, sensors_area_building,
sensor_orientation_building, sensor_intersection_building
) = calc_sensors_building(building_geometry, grid_size)
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = [('srf' + str(x)) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
sensor_intersection_zone.append(sensor_intersection_building)
names_zone.append(building_name)
pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,
'orientation': sensor_orientation_building, 'intersection':
sensor_intersection_building, 'Xcoor': [x[0] for x in
sensors_coords_building], 'Ycoor': [x[1] for x in
sensors_coords_building], 'Zcoor': [x[2] for x in
sensors_coords_building], 'Xdir': [x[0] for x in
sensors_dir_building], 'Ydir': [x[1] for x in
sensors_dir_building], 'Zdir': [x[2] for x in
sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':
sensors_type_building}).to_csv(locator.get_radiation_metadata(
building_name), index=None)
return (sensors_coords_zone, sensors_dir_zone,
sensors_total_number_list, names_zone, sensors_code_zone,
sensor_intersection_zone)
def isolation_daysim(chunk_n, cea_daysim, building_names, locator,
radiance_parameters, write_sensor_data, grid_size, max_global,
weatherfile, geometry_pickle_dir):
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.
format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=
daysim_project.project_path))
print('Calculating and sending sensor points')
(sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,
sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(
building_names, locator, grid_size, geometry_pickle_dir))
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone,
sensors_dir_zone, num_sensors, 'w/m2')
print('Starting Daysim simulation for buildings: {buildings}'.format(
buildings=names_zone))
print('Total number of sensors: {num_sensors}'.format(num_sensors=
num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],
radiance_parameters['rad_ad'], radiance_parameters['rad_as'],
radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],
radiance_parameters['rad_lr'], radiance_parameters['rad_st'],
radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],
radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],
radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print('Writing results to disk')
index = 0
for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(
names_zone, sensors_number_zone, sensors_code_zone,
sensor_intersection_zone):
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building,
selection_of_results.tolist()))
index = index + sensors_number_building
write_aggregated_results(building_name,
items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name,
items_sensor_name_and_result, locator)
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w'
) as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result,
locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',
'windows_south_kW', 'windows_north_kW', 'walls_east_kW',
'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',
'windows_south_m2', 'windows_north_m2', 'walls_east_m2',
'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields,
solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index(
'SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])) for surface in
select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)
data_aggregated_kW['Date'] = weatherfile['date']
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Jimeno A. Fonseca'
__copyright__ = (
'Copyright 2017, Architecture and Building Systems - ETH Zurich')
__credits__ = ['Jimeno A. Fonseca', 'Kian Wee Chen']
__license__ = 'MIT'
__version__ = '0.1'
__maintainer__ = 'Daren Thomas'
__email__ = '[email protected]'
__status__ = 'Production'
<|reserved_special_token_0|>
suppress_3rd_party_debug_loggers()
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(
chunk_n) + '.pts')
sensor_file = open(sensor_file_path, 'w')
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.
sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,
orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(
mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in
zip(sensor_surfaces, sensor_intersection)]
return (sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection)
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size['walls_grid']
sensor_horizontal_grid_dim = grid_size['roof_grid']
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry,
'orientation_{srf_type}'.format(srf_type=srf_type))
normals_list = getattr(building_geometry, 'normals_{srf_type}'.
format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry,
'orientation_{srf_type}'.format(srf_type=srf_type))
normals_list = getattr(building_geometry, 'normals_{srf_type}'.
format(srf_type=srf_type))
interesection_list = getattr(building_geometry,
'intersect_{srf_type}'.format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list,
normals_list, occface_list, interesection_list):
(sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection) = (
generate_sensor_surfaces(face, sensor_vertical_grid_dim,
sensor_horizontal_grid_dim, srf_type, orientation, normal,
intersection))
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return (sensor_dir_list, sensor_cord_list, sensor_type_list,
sensor_area_list, sensor_orientation_list, sensor_intersection_list)
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(
geometry_pickle_dir, 'zone', building_name))
(sensors_dir_building, sensors_coords_building,
sensors_type_building, sensors_area_building,
sensor_orientation_building, sensor_intersection_building
) = calc_sensors_building(building_geometry, grid_size)
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = [('srf' + str(x)) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
sensor_intersection_zone.append(sensor_intersection_building)
names_zone.append(building_name)
pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,
'orientation': sensor_orientation_building, 'intersection':
sensor_intersection_building, 'Xcoor': [x[0] for x in
sensors_coords_building], 'Ycoor': [x[1] for x in
sensors_coords_building], 'Zcoor': [x[2] for x in
sensors_coords_building], 'Xdir': [x[0] for x in
sensors_dir_building], 'Ydir': [x[1] for x in
sensors_dir_building], 'Zdir': [x[2] for x in
sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':
sensors_type_building}).to_csv(locator.get_radiation_metadata(
building_name), index=None)
return (sensors_coords_zone, sensors_dir_zone,
sensors_total_number_list, names_zone, sensors_code_zone,
sensor_intersection_zone)
def isolation_daysim(chunk_n, cea_daysim, building_names, locator,
radiance_parameters, write_sensor_data, grid_size, max_global,
weatherfile, geometry_pickle_dir):
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.
format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=
daysim_project.project_path))
print('Calculating and sending sensor points')
(sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,
sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(
building_names, locator, grid_size, geometry_pickle_dir))
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone,
sensors_dir_zone, num_sensors, 'w/m2')
print('Starting Daysim simulation for buildings: {buildings}'.format(
buildings=names_zone))
print('Total number of sensors: {num_sensors}'.format(num_sensors=
num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],
radiance_parameters['rad_ad'], radiance_parameters['rad_as'],
radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],
radiance_parameters['rad_lr'], radiance_parameters['rad_st'],
radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],
radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],
radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print('Writing results to disk')
index = 0
for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(
names_zone, sensors_number_zone, sensors_code_zone,
sensor_intersection_zone):
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building,
selection_of_results.tolist()))
index = index + sensors_number_building
write_aggregated_results(building_name,
items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name,
items_sensor_name_and_result, locator)
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w'
) as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result,
locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',
'windows_south_kW', 'windows_north_kW', 'walls_east_kW',
'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',
'windows_south_m2', 'windows_north_m2', 'walls_east_m2',
'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields,
solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index(
'SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])) for surface in
select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)
data_aggregated_kW['Date'] = weatherfile['date']
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
<|reserved_special_token_1|>
import json
import os
import numpy as np
import pandas as pd
import py4design.py2radiance as py2radiance
import py4design.py3dmodel.calculate as calculate
from py4design import py3dmodel
__author__ = 'Jimeno A. Fonseca'
__copyright__ = (
'Copyright 2017, Architecture and Building Systems - ETH Zurich')
__credits__ = ['Jimeno A. Fonseca', 'Kian Wee Chen']
__license__ = 'MIT'
__version__ = '0.1'
__maintainer__ = 'Daren Thomas'
__email__ = '[email protected]'
__status__ = 'Production'
from cea.constants import HOURS_IN_YEAR
from cea.resources.radiation_daysim.geometry_generator import BuildingGeometry
from cea import suppress_3rd_party_debug_loggers
suppress_3rd_party_debug_loggers()
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(
chunk_n) + '.pts')
sensor_file = open(sensor_file_path, 'w')
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.
sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,
orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(
mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in
zip(sensor_surfaces, sensor_intersection)]
return (sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection)
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size['walls_grid']
sensor_horizontal_grid_dim = grid_size['roof_grid']
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry,
'orientation_{srf_type}'.format(srf_type=srf_type))
normals_list = getattr(building_geometry, 'normals_{srf_type}'.
format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry,
'orientation_{srf_type}'.format(srf_type=srf_type))
normals_list = getattr(building_geometry, 'normals_{srf_type}'.
format(srf_type=srf_type))
interesection_list = getattr(building_geometry,
'intersect_{srf_type}'.format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list,
normals_list, occface_list, interesection_list):
(sensor_dir, sensor_cord, sensor_type, sensor_area,
sensor_orientation, sensor_intersection) = (
generate_sensor_surfaces(face, sensor_vertical_grid_dim,
sensor_horizontal_grid_dim, srf_type, orientation, normal,
intersection))
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return (sensor_dir_list, sensor_cord_list, sensor_type_list,
sensor_area_list, sensor_orientation_list, sensor_intersection_list)
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(
geometry_pickle_dir, 'zone', building_name))
(sensors_dir_building, sensors_coords_building,
sensors_type_building, sensors_area_building,
sensor_orientation_building, sensor_intersection_building
) = calc_sensors_building(building_geometry, grid_size)
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = [('srf' + str(x)) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
sensor_intersection_zone.append(sensor_intersection_building)
names_zone.append(building_name)
pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,
'orientation': sensor_orientation_building, 'intersection':
sensor_intersection_building, 'Xcoor': [x[0] for x in
sensors_coords_building], 'Ycoor': [x[1] for x in
sensors_coords_building], 'Zcoor': [x[2] for x in
sensors_coords_building], 'Xdir': [x[0] for x in
sensors_dir_building], 'Ydir': [x[1] for x in
sensors_dir_building], 'Zdir': [x[2] for x in
sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':
sensors_type_building}).to_csv(locator.get_radiation_metadata(
building_name), index=None)
return (sensors_coords_zone, sensors_dir_zone,
sensors_total_number_list, names_zone, sensors_code_zone,
sensor_intersection_zone)
def isolation_daysim(chunk_n, cea_daysim, building_names, locator,
radiance_parameters, write_sensor_data, grid_size, max_global,
weatherfile, geometry_pickle_dir):
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.
format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=
daysim_project.project_path))
print('Calculating and sending sensor points')
(sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,
sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(
building_names, locator, grid_size, geometry_pickle_dir))
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone,
sensors_dir_zone, num_sensors, 'w/m2')
print('Starting Daysim simulation for buildings: {buildings}'.format(
buildings=names_zone))
print('Total number of sensors: {num_sensors}'.format(num_sensors=
num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],
radiance_parameters['rad_ad'], radiance_parameters['rad_as'],
radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],
radiance_parameters['rad_lr'], radiance_parameters['rad_st'],
radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],
radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],
radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print('Writing results to disk')
index = 0
for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(
names_zone, sensors_number_zone, sensors_code_zone,
sensor_intersection_zone):
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building,
selection_of_results.tolist()))
index = index + sensors_number_building
write_aggregated_results(building_name,
items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name,
items_sensor_name_and_result, locator)
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w'
) as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result,
locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',
'windows_south_kW', 'windows_north_kW', 'walls_east_kW',
'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',
'windows_south_m2', 'windows_north_m2', 'walls_east_m2',
'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields,
solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index(
'SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])) for surface in
select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)
data_aggregated_kW['Date'] = weatherfile['date']
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
<|reserved_special_token_1|>
import json
import os
import numpy as np
import pandas as pd
import py4design.py2radiance as py2radiance
import py4design.py3dmodel.calculate as calculate
from py4design import py3dmodel
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Kian Wee Chen"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
from cea.constants import HOURS_IN_YEAR
from cea.resources.radiation_daysim.geometry_generator import BuildingGeometry
from cea import suppress_3rd_party_debug_loggers
suppress_3rd_party_debug_loggers()
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, "points_" + str(chunk_n) + ".pts")
sensor_file = open(sensor_file_path, "w")
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type, orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
# put it into occ and subdivide surfaces
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
# calculate list of properties per surface
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [calculate.face_area(x) * (1.0 - scalar) for x, scalar in zip(sensor_surfaces, sensor_intersection)]
return sensor_dir, sensor_cord, sensor_type, sensor_area, sensor_orientation, sensor_intersection
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size["walls_grid"]
sensor_horizontal_grid_dim = grid_size["roof_grid"]
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = getattr(building_geometry, "intersect_{srf_type}".format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list, normals_list, occface_list,
interesection_list):
sensor_dir, \
sensor_cord, \
sensor_type, \
sensor_area, \
sensor_orientation, \
sensor_intersection = generate_sensor_surfaces(face,
sensor_vertical_grid_dim,
sensor_horizontal_grid_dim,
srf_type,
orientation,
normal,
intersection)
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return sensor_dir_list, sensor_cord_list, sensor_type_list, sensor_area_list, sensor_orientation_list, sensor_intersection_list
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name))
# get sensors in the building
sensors_dir_building, \
sensors_coords_building, \
sensors_type_building, \
sensors_area_building, \
sensor_orientation_building, \
sensor_intersection_building = calc_sensors_building(building_geometry, grid_size)
# get the total number of sensors and store in lst
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = ['srf' + str(x) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
# get the total list of coordinates and directions to send to daysim
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
# get total list of intersections
sensor_intersection_zone.append(sensor_intersection_building)
# get the name of all buildings
names_zone.append(building_name)
# save sensors geometry result to disk
pd.DataFrame({'BUILDING': building_name,
'SURFACE': sensors_code,
'orientation': sensor_orientation_building,
'intersection': sensor_intersection_building,
'Xcoor': [x[0] for x in sensors_coords_building],
'Ycoor': [x[1] for x in sensors_coords_building],
'Zcoor': [x[2] for x in sensors_coords_building],
'Xdir': [x[0] for x in sensors_dir_building],
'Ydir': [x[1] for x in sensors_dir_building],
'Zdir': [x[2] for x in sensors_dir_building],
'AREA_m2': sensors_area_building,
'TYPE': sensors_type_building}).to_csv(locator.get_radiation_metadata(building_name), index=None)
return sensors_coords_zone, sensors_dir_zone, sensors_total_number_list, names_zone, sensors_code_zone, sensor_intersection_zone
def isolation_daysim(chunk_n, cea_daysim, building_names, locator, radiance_parameters, write_sensor_data, grid_size,
max_global, weatherfile, geometry_pickle_dir):
# initialize daysim project
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=daysim_project.project_path))
# calculate sensors
print("Calculating and sending sensor points")
sensors_coords_zone, \
sensors_dir_zone, \
sensors_number_zone, \
names_zone, \
sensors_code_zone, \
sensor_intersection_zone = calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir)
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone, sensors_dir_zone, num_sensors, "w/m2")
print("Starting Daysim simulation for buildings: {buildings}".format(buildings=names_zone))
print("Total number of sensors: {num_sensors}".format(num_sensors=num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters["rad_ab"], radiance_parameters["rad_ad"],
radiance_parameters["rad_as"], radiance_parameters["rad_ar"],
radiance_parameters["rad_aa"], radiance_parameters["rad_lr"],
radiance_parameters["rad_st"], radiance_parameters["rad_sj"],
radiance_parameters["rad_lw"], radiance_parameters["rad_dj"],
radiance_parameters["rad_ds"], radiance_parameters["rad_dr"],
radiance_parameters["rad_dp"])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
# check inconsistencies and replace by max value of weather file
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
# Check if leap year and remove extra day
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print("Writing results to disk")
index = 0
for building_name, \
sensors_number_building, \
sensor_code_building, \
sensor_intersection_building in zip(names_zone,
sensors_number_zone,
sensors_code_zone,
sensor_intersection_zone):
# select sensors data
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building, selection_of_results.tolist()))
index = index + sensors_number_building
# create summary and save to disk
write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name, items_sensor_name_and_result, locator)
# erase daysim folder to avoid conflicts after every iteration
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w') as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW',
'windows_west_kW',
'windows_south_kW',
'windows_north_kW',
'walls_east_kW',
'walls_west_kW',
'walls_south_kW',
'walls_north_kW',
'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2',
'windows_west_m2',
'windows_south_m2',
'windows_north_m2',
'walls_east_m2',
'walls_west_m2',
'walls_south_m2',
'walls_north_m2',
'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields, solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index('SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])
for surface in select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000 # in kWh
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = (pd.DataFrame(dict_not_aggregated)).round(2)
data_aggregated_kW["Date"] = weatherfile["date"]
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
|
flexible
|
{
"blob_id": "164b0afde225119a8fbd4ccfccbbbc3550aa75fe",
"index": 2634,
"step-1": "<mask token>\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\n<mask token>\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-2": "<mask token>\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size['walls_grid']\n sensor_horizontal_grid_dim = grid_size['roof_grid']\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = getattr(building_geometry,\n 'intersect_{srf_type}'.format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list,\n normals_list, occface_list, interesection_list):\n (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection) = (\n generate_sensor_surfaces(face, sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim, srf_type, orientation, normal,\n intersection))\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n return (sensor_dir_list, sensor_cord_list, sensor_type_list,\n sensor_area_list, sensor_orientation_list, sensor_intersection_list)\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-3": "<mask token>\n__author__ = 'Jimeno A. Fonseca'\n__copyright__ = (\n 'Copyright 2017, Architecture and Building Systems - ETH Zurich')\n__credits__ = ['Jimeno A. Fonseca', 'Kian Wee Chen']\n__license__ = 'MIT'\n__version__ = '0.1'\n__maintainer__ = 'Daren Thomas'\n__email__ = '[email protected]'\n__status__ = 'Production'\n<mask token>\nsuppress_3rd_party_debug_loggers()\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size['walls_grid']\n sensor_horizontal_grid_dim = grid_size['roof_grid']\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = getattr(building_geometry,\n 'intersect_{srf_type}'.format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list,\n normals_list, occface_list, interesection_list):\n (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection) = (\n generate_sensor_surfaces(face, sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim, srf_type, orientation, normal,\n intersection))\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n return (sensor_dir_list, sensor_cord_list, sensor_type_list,\n sensor_area_list, sensor_orientation_list, sensor_intersection_list)\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-4": "import json\nimport os\nimport numpy as np\nimport pandas as pd\nimport py4design.py2radiance as py2radiance\nimport py4design.py3dmodel.calculate as calculate\nfrom py4design import py3dmodel\n__author__ = 'Jimeno A. Fonseca'\n__copyright__ = (\n 'Copyright 2017, Architecture and Building Systems - ETH Zurich')\n__credits__ = ['Jimeno A. Fonseca', 'Kian Wee Chen']\n__license__ = 'MIT'\n__version__ = '0.1'\n__maintainer__ = 'Daren Thomas'\n__email__ = '[email protected]'\n__status__ = 'Production'\nfrom cea.constants import HOURS_IN_YEAR\nfrom cea.resources.radiation_daysim.geometry_generator import BuildingGeometry\nfrom cea import suppress_3rd_party_debug_loggers\nsuppress_3rd_party_debug_loggers()\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size['walls_grid']\n sensor_horizontal_grid_dim = grid_size['roof_grid']\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = getattr(building_geometry,\n 'intersect_{srf_type}'.format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list,\n normals_list, occface_list, interesection_list):\n (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection) = (\n generate_sensor_surfaces(face, sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim, srf_type, orientation, normal,\n intersection))\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n return (sensor_dir_list, sensor_cord_list, sensor_type_list,\n sensor_area_list, sensor_orientation_list, sensor_intersection_list)\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-5": "import json\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport py4design.py2radiance as py2radiance\nimport py4design.py3dmodel.calculate as calculate\nfrom py4design import py3dmodel\n\n__author__ = \"Jimeno A. Fonseca\"\n__copyright__ = \"Copyright 2017, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Jimeno A. Fonseca\", \"Kian Wee Chen\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nfrom cea.constants import HOURS_IN_YEAR\nfrom cea.resources.radiation_daysim.geometry_generator import BuildingGeometry\nfrom cea import suppress_3rd_party_debug_loggers\n\nsuppress_3rd_party_debug_loggers()\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, \"points_\" + str(chunk_n) + \".pts\")\n sensor_file = open(sensor_file_path, \"w\")\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type, orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n # put it into occ and subdivide surfaces\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n\n # calculate list of properties per surface\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [calculate.face_area(x) * (1.0 - scalar) for x, scalar in zip(sensor_surfaces, sensor_intersection)]\n\n return sensor_dir, sensor_cord, sensor_type, sensor_area, sensor_orientation, sensor_intersection\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size[\"walls_grid\"]\n sensor_horizontal_grid_dim = grid_size[\"roof_grid\"]\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry, \"orientation_{srf_type}\".format(srf_type=srf_type))\n normals_list = getattr(building_geometry, \"normals_{srf_type}\".format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry, \"orientation_{srf_type}\".format(srf_type=srf_type))\n normals_list = getattr(building_geometry, \"normals_{srf_type}\".format(srf_type=srf_type))\n interesection_list = getattr(building_geometry, \"intersect_{srf_type}\".format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list, normals_list, occface_list,\n interesection_list):\n sensor_dir, \\\n sensor_cord, \\\n sensor_type, \\\n sensor_area, \\\n sensor_orientation, \\\n sensor_intersection = generate_sensor_surfaces(face,\n sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim,\n srf_type,\n orientation,\n normal,\n intersection)\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n\n return sensor_dir_list, sensor_cord_list, sensor_type_list, sensor_area_list, sensor_orientation_list, sensor_intersection_list\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name))\n # get sensors in the building\n sensors_dir_building, \\\n sensors_coords_building, \\\n sensors_type_building, \\\n sensors_area_building, \\\n sensor_orientation_building, \\\n sensor_intersection_building = calc_sensors_building(building_geometry, grid_size)\n\n # get the total number of sensors and store in lst\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n\n sensors_code = ['srf' + str(x) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n\n # get the total list of coordinates and directions to send to daysim\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n\n # get total list of intersections\n sensor_intersection_zone.append(sensor_intersection_building)\n\n # get the name of all buildings\n names_zone.append(building_name)\n\n # save sensors geometry result to disk\n pd.DataFrame({'BUILDING': building_name,\n 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building,\n 'intersection': sensor_intersection_building,\n 'Xcoor': [x[0] for x in sensors_coords_building],\n 'Ycoor': [x[1] for x in sensors_coords_building],\n 'Zcoor': [x[2] for x in sensors_coords_building],\n 'Xdir': [x[0] for x in sensors_dir_building],\n 'Ydir': [x[1] for x in sensors_dir_building],\n 'Zdir': [x[2] for x in sensors_dir_building],\n 'AREA_m2': sensors_area_building,\n 'TYPE': sensors_type_building}).to_csv(locator.get_radiation_metadata(building_name), index=None)\n\n return sensors_coords_zone, sensors_dir_zone, sensors_total_number_list, names_zone, sensors_code_zone, sensor_intersection_zone\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator, radiance_parameters, write_sensor_data, grid_size,\n max_global, weatherfile, geometry_pickle_dir):\n # initialize daysim project\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=daysim_project.project_path))\n\n # calculate sensors\n print(\"Calculating and sending sensor points\")\n sensors_coords_zone, \\\n sensors_dir_zone, \\\n sensors_number_zone, \\\n names_zone, \\\n sensors_code_zone, \\\n sensor_intersection_zone = calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir)\n\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone, sensors_dir_zone, num_sensors, \"w/m2\")\n\n print(\"Starting Daysim simulation for buildings: {buildings}\".format(buildings=names_zone))\n print(\"Total number of sensors: {num_sensors}\".format(num_sensors=num_sensors))\n\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters[\"rad_ab\"], radiance_parameters[\"rad_ad\"],\n radiance_parameters[\"rad_as\"], radiance_parameters[\"rad_ar\"],\n radiance_parameters[\"rad_aa\"], radiance_parameters[\"rad_lr\"],\n radiance_parameters[\"rad_st\"], radiance_parameters[\"rad_sj\"],\n radiance_parameters[\"rad_lw\"], radiance_parameters[\"rad_dj\"],\n radiance_parameters[\"rad_ds\"], radiance_parameters[\"rad_dr\"],\n radiance_parameters[\"rad_dp\"])\n\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n\n # check inconsistencies and replace by max value of weather file\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n\n # Check if leap year and remove extra day\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n\n print(\"Writing results to disk\")\n index = 0\n for building_name, \\\n sensors_number_building, \\\n sensor_code_building, \\\n sensor_intersection_building in zip(names_zone,\n sensors_number_zone,\n sensors_code_zone,\n sensor_intersection_zone):\n # select sensors data\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building, selection_of_results.tolist()))\n index = index + sensors_number_building\n\n # create summary and save to disk\n write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile)\n\n if write_sensor_data:\n write_sensor_results(building_name, items_sensor_name_and_result, locator)\n\n # erase daysim folder to avoid conflicts after every iteration\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w') as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW',\n 'windows_west_kW',\n 'windows_south_kW',\n 'windows_north_kW',\n 'walls_east_kW',\n 'walls_west_kW',\n 'walls_south_kW',\n 'walls_north_kW',\n 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2',\n 'windows_west_m2',\n 'windows_south_m2',\n 'windows_north_m2',\n 'walls_east_m2',\n 'walls_west_m2',\n 'walls_south_m2',\n 'walls_north_m2',\n 'roofs_top_m2']\n dict_not_aggregated = {}\n\n for field, field_area in zip(solar_analysis_fields, solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index('SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])\n for surface in select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000 # in kWh\n dict_not_aggregated[field_area] = area_m2\n\n data_aggregated_kW = (pd.DataFrame(dict_not_aggregated)).round(2)\n data_aggregated_kW[\"Date\"] = weatherfile[\"date\"]\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.data_aggregation_setting import DataAggregationSetting # noqa: F401,E501
from swagger_client.models.raw_data_setting_v1 import RawDataSettingV1 # noqa: F401,E501
from swagger_client.models.units_setting import UnitsSetting # noqa: F401,E501
from swagger_client.models.work_hours_setting import WorkHoursSetting # noqa: F401,E501
class RawDataSettingsV1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data_aggregation_setting': 'DataAggregationSetting',
'raw_data_setting': 'RawDataSettingV1',
'units_setting': 'UnitsSetting',
'work_hours_setting': 'WorkHoursSetting'
}
attribute_map = {
'data_aggregation_setting': 'dataAggregationSetting',
'raw_data_setting': 'rawDataSetting',
'units_setting': 'unitsSetting',
'work_hours_setting': 'workHoursSetting'
}
def __init__(self, data_aggregation_setting=None, raw_data_setting=None, units_setting=None, work_hours_setting=None): # noqa: E501
"""RawDataSettingsV1 - a model defined in Swagger""" # noqa: E501
self._data_aggregation_setting = None
self._raw_data_setting = None
self._units_setting = None
self._work_hours_setting = None
self.discriminator = None
if data_aggregation_setting is not None:
self.data_aggregation_setting = data_aggregation_setting
if raw_data_setting is not None:
self.raw_data_setting = raw_data_setting
if units_setting is not None:
self.units_setting = units_setting
if work_hours_setting is not None:
self.work_hours_setting = work_hours_setting
@property
def data_aggregation_setting(self):
"""Gets the data_aggregation_setting of this RawDataSettingsV1. # noqa: E501
:return: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501
:rtype: DataAggregationSetting
"""
return self._data_aggregation_setting
@data_aggregation_setting.setter
def data_aggregation_setting(self, data_aggregation_setting):
"""Sets the data_aggregation_setting of this RawDataSettingsV1.
:param data_aggregation_setting: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501
:type: DataAggregationSetting
"""
self._data_aggregation_setting = data_aggregation_setting
@property
def raw_data_setting(self):
"""Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501
:return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501
:rtype: RawDataSettingV1
"""
return self._raw_data_setting
@raw_data_setting.setter
def raw_data_setting(self, raw_data_setting):
"""Sets the raw_data_setting of this RawDataSettingsV1.
:param raw_data_setting: The raw_data_setting of this RawDataSettingsV1. # noqa: E501
:type: RawDataSettingV1
"""
self._raw_data_setting = raw_data_setting
@property
def units_setting(self):
"""Gets the units_setting of this RawDataSettingsV1. # noqa: E501
:return: The units_setting of this RawDataSettingsV1. # noqa: E501
:rtype: UnitsSetting
"""
return self._units_setting
@units_setting.setter
def units_setting(self, units_setting):
"""Sets the units_setting of this RawDataSettingsV1.
:param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501
:type: UnitsSetting
"""
self._units_setting = units_setting
@property
def work_hours_setting(self):
"""Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501
:return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501
:rtype: WorkHoursSetting
"""
return self._work_hours_setting
@work_hours_setting.setter
def work_hours_setting(self, work_hours_setting):
"""Sets the work_hours_setting of this RawDataSettingsV1.
:param work_hours_setting: The work_hours_setting of this RawDataSettingsV1. # noqa: E501
:type: WorkHoursSetting
"""
self._work_hours_setting = work_hours_setting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RawDataSettingsV1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawDataSettingsV1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
normal
|
{
"blob_id": "25d4fa44cb17048301076391d5d67ae0b0812ac7",
"index": 3988,
"step-1": "<mask token>\n\n\nclass RawDataSettingsV1(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, data_aggregation_setting=None, raw_data_setting=None,\n units_setting=None, work_hours_setting=None):\n \"\"\"RawDataSettingsV1 - a model defined in Swagger\"\"\"\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting\n <mask token>\n <mask token>\n\n @property\n def raw_data_setting(self):\n \"\"\"Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: RawDataSettingV1\n \"\"\"\n return self._raw_data_setting\n <mask token>\n\n @property\n def units_setting(self):\n \"\"\"Gets the units_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The units_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: UnitsSetting\n \"\"\"\n return self._units_setting\n\n @units_setting.setter\n def units_setting(self, units_setting):\n \"\"\"Sets the units_setting of this RawDataSettingsV1.\n\n\n :param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501\n :type: UnitsSetting\n \"\"\"\n self._units_setting = units_setting\n\n @property\n def work_hours_setting(self):\n \"\"\"Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: WorkHoursSetting\n \"\"\"\n return self._work_hours_setting\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RawDataSettingsV1):\n return False\n return self.__dict__ == other.__dict__\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RawDataSettingsV1(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, data_aggregation_setting=None, raw_data_setting=None,\n units_setting=None, work_hours_setting=None):\n \"\"\"RawDataSettingsV1 - a model defined in Swagger\"\"\"\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting\n\n @property\n def data_aggregation_setting(self):\n \"\"\"Gets the data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: DataAggregationSetting\n \"\"\"\n return self._data_aggregation_setting\n\n @data_aggregation_setting.setter\n def data_aggregation_setting(self, data_aggregation_setting):\n \"\"\"Sets the data_aggregation_setting of this RawDataSettingsV1.\n\n\n :param data_aggregation_setting: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :type: DataAggregationSetting\n \"\"\"\n self._data_aggregation_setting = data_aggregation_setting\n\n @property\n def raw_data_setting(self):\n \"\"\"Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: RawDataSettingV1\n \"\"\"\n return self._raw_data_setting\n\n @raw_data_setting.setter\n def raw_data_setting(self, raw_data_setting):\n \"\"\"Sets the raw_data_setting of this RawDataSettingsV1.\n\n\n :param raw_data_setting: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :type: RawDataSettingV1\n \"\"\"\n self._raw_data_setting = raw_data_setting\n\n @property\n def units_setting(self):\n \"\"\"Gets the units_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The units_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: UnitsSetting\n \"\"\"\n return self._units_setting\n\n @units_setting.setter\n def units_setting(self, units_setting):\n \"\"\"Sets the units_setting of this RawDataSettingsV1.\n\n\n :param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501\n :type: UnitsSetting\n \"\"\"\n self._units_setting = units_setting\n\n @property\n def work_hours_setting(self):\n \"\"\"Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: WorkHoursSetting\n \"\"\"\n return self._work_hours_setting\n\n @work_hours_setting.setter\n def work_hours_setting(self, work_hours_setting):\n \"\"\"Sets the work_hours_setting of this RawDataSettingsV1.\n\n\n :param work_hours_setting: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :type: WorkHoursSetting\n \"\"\"\n self._work_hours_setting = work_hours_setting\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(RawDataSettingsV1, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RawDataSettingsV1):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-3": "<mask token>\n\n\nclass RawDataSettingsV1(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'data_aggregation_setting': 'DataAggregationSetting',\n 'raw_data_setting': 'RawDataSettingV1', 'units_setting':\n 'UnitsSetting', 'work_hours_setting': 'WorkHoursSetting'}\n attribute_map = {'data_aggregation_setting': 'dataAggregationSetting',\n 'raw_data_setting': 'rawDataSetting', 'units_setting':\n 'unitsSetting', 'work_hours_setting': 'workHoursSetting'}\n\n def __init__(self, data_aggregation_setting=None, raw_data_setting=None,\n units_setting=None, work_hours_setting=None):\n \"\"\"RawDataSettingsV1 - a model defined in Swagger\"\"\"\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting\n\n @property\n def data_aggregation_setting(self):\n \"\"\"Gets the data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: DataAggregationSetting\n \"\"\"\n return self._data_aggregation_setting\n\n @data_aggregation_setting.setter\n def data_aggregation_setting(self, data_aggregation_setting):\n \"\"\"Sets the data_aggregation_setting of this RawDataSettingsV1.\n\n\n :param data_aggregation_setting: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :type: DataAggregationSetting\n \"\"\"\n self._data_aggregation_setting = data_aggregation_setting\n\n @property\n def raw_data_setting(self):\n \"\"\"Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: RawDataSettingV1\n \"\"\"\n return self._raw_data_setting\n\n @raw_data_setting.setter\n def raw_data_setting(self, raw_data_setting):\n \"\"\"Sets the raw_data_setting of this RawDataSettingsV1.\n\n\n :param raw_data_setting: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :type: RawDataSettingV1\n \"\"\"\n self._raw_data_setting = raw_data_setting\n\n @property\n def units_setting(self):\n \"\"\"Gets the units_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The units_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: UnitsSetting\n \"\"\"\n return self._units_setting\n\n @units_setting.setter\n def units_setting(self, units_setting):\n \"\"\"Sets the units_setting of this RawDataSettingsV1.\n\n\n :param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501\n :type: UnitsSetting\n \"\"\"\n self._units_setting = units_setting\n\n @property\n def work_hours_setting(self):\n \"\"\"Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: WorkHoursSetting\n \"\"\"\n return self._work_hours_setting\n\n @work_hours_setting.setter\n def work_hours_setting(self, work_hours_setting):\n \"\"\"Sets the work_hours_setting of this RawDataSettingsV1.\n\n\n :param work_hours_setting: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :type: WorkHoursSetting\n \"\"\"\n self._work_hours_setting = work_hours_setting\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(RawDataSettingsV1, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RawDataSettingsV1):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-4": "<mask token>\nimport pprint\nimport re\nimport six\nfrom swagger_client.models.data_aggregation_setting import DataAggregationSetting\nfrom swagger_client.models.raw_data_setting_v1 import RawDataSettingV1\nfrom swagger_client.models.units_setting import UnitsSetting\nfrom swagger_client.models.work_hours_setting import WorkHoursSetting\n\n\nclass RawDataSettingsV1(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'data_aggregation_setting': 'DataAggregationSetting',\n 'raw_data_setting': 'RawDataSettingV1', 'units_setting':\n 'UnitsSetting', 'work_hours_setting': 'WorkHoursSetting'}\n attribute_map = {'data_aggregation_setting': 'dataAggregationSetting',\n 'raw_data_setting': 'rawDataSetting', 'units_setting':\n 'unitsSetting', 'work_hours_setting': 'workHoursSetting'}\n\n def __init__(self, data_aggregation_setting=None, raw_data_setting=None,\n units_setting=None, work_hours_setting=None):\n \"\"\"RawDataSettingsV1 - a model defined in Swagger\"\"\"\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting\n\n @property\n def data_aggregation_setting(self):\n \"\"\"Gets the data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: DataAggregationSetting\n \"\"\"\n return self._data_aggregation_setting\n\n @data_aggregation_setting.setter\n def data_aggregation_setting(self, data_aggregation_setting):\n \"\"\"Sets the data_aggregation_setting of this RawDataSettingsV1.\n\n\n :param data_aggregation_setting: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :type: DataAggregationSetting\n \"\"\"\n self._data_aggregation_setting = data_aggregation_setting\n\n @property\n def raw_data_setting(self):\n \"\"\"Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: RawDataSettingV1\n \"\"\"\n return self._raw_data_setting\n\n @raw_data_setting.setter\n def raw_data_setting(self, raw_data_setting):\n \"\"\"Sets the raw_data_setting of this RawDataSettingsV1.\n\n\n :param raw_data_setting: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :type: RawDataSettingV1\n \"\"\"\n self._raw_data_setting = raw_data_setting\n\n @property\n def units_setting(self):\n \"\"\"Gets the units_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The units_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: UnitsSetting\n \"\"\"\n return self._units_setting\n\n @units_setting.setter\n def units_setting(self, units_setting):\n \"\"\"Sets the units_setting of this RawDataSettingsV1.\n\n\n :param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501\n :type: UnitsSetting\n \"\"\"\n self._units_setting = units_setting\n\n @property\n def work_hours_setting(self):\n \"\"\"Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: WorkHoursSetting\n \"\"\"\n return self._work_hours_setting\n\n @work_hours_setting.setter\n def work_hours_setting(self, work_hours_setting):\n \"\"\"Sets the work_hours_setting of this RawDataSettingsV1.\n\n\n :param work_hours_setting: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :type: WorkHoursSetting\n \"\"\"\n self._work_hours_setting = work_hours_setting\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(RawDataSettingsV1, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RawDataSettingsV1):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-5": "# coding: utf-8\n\n\"\"\"\n SevOne API Documentation\n\n Supported endpoints by the new RESTful API # noqa: E501\n\n OpenAPI spec version: 2.1.18, Hash: db562e6\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.models.data_aggregation_setting import DataAggregationSetting # noqa: F401,E501\nfrom swagger_client.models.raw_data_setting_v1 import RawDataSettingV1 # noqa: F401,E501\nfrom swagger_client.models.units_setting import UnitsSetting # noqa: F401,E501\nfrom swagger_client.models.work_hours_setting import WorkHoursSetting # noqa: F401,E501\n\n\nclass RawDataSettingsV1(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'data_aggregation_setting': 'DataAggregationSetting',\n 'raw_data_setting': 'RawDataSettingV1',\n 'units_setting': 'UnitsSetting',\n 'work_hours_setting': 'WorkHoursSetting'\n }\n\n attribute_map = {\n 'data_aggregation_setting': 'dataAggregationSetting',\n 'raw_data_setting': 'rawDataSetting',\n 'units_setting': 'unitsSetting',\n 'work_hours_setting': 'workHoursSetting'\n }\n\n def __init__(self, data_aggregation_setting=None, raw_data_setting=None, units_setting=None, work_hours_setting=None): # noqa: E501\n \"\"\"RawDataSettingsV1 - a model defined in Swagger\"\"\" # noqa: E501\n\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting\n\n @property\n def data_aggregation_setting(self):\n \"\"\"Gets the data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: DataAggregationSetting\n \"\"\"\n return self._data_aggregation_setting\n\n @data_aggregation_setting.setter\n def data_aggregation_setting(self, data_aggregation_setting):\n \"\"\"Sets the data_aggregation_setting of this RawDataSettingsV1.\n\n\n :param data_aggregation_setting: The data_aggregation_setting of this RawDataSettingsV1. # noqa: E501\n :type: DataAggregationSetting\n \"\"\"\n\n self._data_aggregation_setting = data_aggregation_setting\n\n @property\n def raw_data_setting(self):\n \"\"\"Gets the raw_data_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: RawDataSettingV1\n \"\"\"\n return self._raw_data_setting\n\n @raw_data_setting.setter\n def raw_data_setting(self, raw_data_setting):\n \"\"\"Sets the raw_data_setting of this RawDataSettingsV1.\n\n\n :param raw_data_setting: The raw_data_setting of this RawDataSettingsV1. # noqa: E501\n :type: RawDataSettingV1\n \"\"\"\n\n self._raw_data_setting = raw_data_setting\n\n @property\n def units_setting(self):\n \"\"\"Gets the units_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The units_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: UnitsSetting\n \"\"\"\n return self._units_setting\n\n @units_setting.setter\n def units_setting(self, units_setting):\n \"\"\"Sets the units_setting of this RawDataSettingsV1.\n\n\n :param units_setting: The units_setting of this RawDataSettingsV1. # noqa: E501\n :type: UnitsSetting\n \"\"\"\n\n self._units_setting = units_setting\n\n @property\n def work_hours_setting(self):\n \"\"\"Gets the work_hours_setting of this RawDataSettingsV1. # noqa: E501\n\n\n :return: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :rtype: WorkHoursSetting\n \"\"\"\n return self._work_hours_setting\n\n @work_hours_setting.setter\n def work_hours_setting(self, work_hours_setting):\n \"\"\"Sets the work_hours_setting of this RawDataSettingsV1.\n\n\n :param work_hours_setting: The work_hours_setting of this RawDataSettingsV1. # noqa: E501\n :type: WorkHoursSetting\n \"\"\"\n\n self._work_hours_setting = work_hours_setting\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(RawDataSettingsV1, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RawDataSettingsV1):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-ids": [
8,
15,
17,
18,
19
]
}
|
[
8,
15,
17,
18,
19
] |
'''给定一个只包含小写字母的有序数组letters 和一个目标字母 target,寻找有序数组里面比目标字母大的最小字母。
数组里字母的顺序是循环的。举个例子,如果目标字母target = 'z' 并且有序数组为 letters = ['a', 'b'],则答案返回 'a'。输入:
示例:
letters = ["c", "f", "j"]
target = "a"
输出: "c"
'''
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
list_a = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
index_target = list_a.index(target)
for i in range(index_target + 1,len(list_a)):
if list_a[i] in letters:
return list_a[i]
return letters[0] #以上查询没找到以后,输出列表第一项
class SolutionBest(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for i in letters: #题目都说了,有序数组,直接迭代就好
if i > target:#惊不惊喜,字母之间在python是可以直接“比较大小”的
return i
return letters[0]
|
normal
|
{
"blob_id": "9cb3d8bc7af0061047136d57abfe68cbb5ae0cd7",
"index": 3344,
"step-1": "<mask token>\n\n\nclass SolutionBest(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SolutionBest(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters:\n if i > target:\n return i\n return letters[0]\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n\nclass SolutionBest(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters:\n if i > target:\n return i\n return letters[0]\n",
"step-4": "<mask token>\n\n\nclass Solution(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n list_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',\n 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',\n 'y', 'z']\n index_target = list_a.index(target)\n for i in range(index_target + 1, len(list_a)):\n if list_a[i] in letters:\n return list_a[i]\n return letters[0]\n\n\nclass SolutionBest(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters:\n if i > target:\n return i\n return letters[0]\n",
"step-5": "'''给定一个只包含小写字母的有序数组letters 和一个目标字母 target,寻找有序数组里面比目标字母大的最小字母。\n\n数组里字母的顺序是循环的。举个例子,如果目标字母target = 'z' 并且有序数组为 letters = ['a', 'b'],则答案返回 'a'。输入:\n\n示例:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"a\"\n输出: \"c\"\n'''\nclass Solution(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n list_a = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n index_target = list_a.index(target)\n for i in range(index_target + 1,len(list_a)):\n if list_a[i] in letters:\n return list_a[i]\n return letters[0] #以上查询没找到以后,输出列表第一项\n\nclass SolutionBest(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters: #题目都说了,有序数组,直接迭代就好\n if i > target:#惊不惊喜,字母之间在python是可以直接“比较大小”的\n return i\n return letters[0]",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
def commonFactors(self, a: int, b: int) ->int:
gcd = math.gcd(a, b)
return sum(a % i == 0 and b % i == 0 for i in range(1, gcd + 1))
|
normal
|
{
"blob_id": "ea696329a0cfd558fb592ffaf6339a35e8950a3c",
"index": 6721,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def commonFactors(self, a: int, b: int) ->int:\n gcd = math.gcd(a, b)\n return sum(a % i == 0 and b % i == 0 for i in range(1, gcd + 1))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
import json
from bll.plugins import service
import logging
import pecan
import pymysql.cursors
LOG = logging.getLogger(__name__)
class PreferencesSvc(service.SvcBase):
"""
Simple service to manage user preferences. User preferences are stored as
JSON in a mysql database.
The ``target`` value for this plugin is ``preferences``. See
:ref:`rest-api` for a full description of the request and response formats.
"""
def __init__(self, *args, **kwargs):
super(PreferencesSvc, self).__init__(*args, **kwargs)
config = pecan.conf.db.to_dict()
config['cursorclass'] = pymysql.cursors.DictCursor
self.connection = pymysql.connect(**config)
@service.expose(action='GET')
def _get(self):
return self._get_mysql(self.data.get("user"))
@service.expose(action='POST')
def _post(self):
self._post_mysql(self.data.get("user"),
self.data.get("prefs"))
@service.expose(action='PUT')
def _put(self):
self._put_mysql(self.data.get("user"),
self.data.get("prefs"))
@service.expose(action='DELETE')
def _delete(self):
self._delete_mysql(self.data.get("user"))
# Functions for writing
def _get_mysql(self, user):
with self.connection.cursor() as cursor:
sql = "SELECT `prefs` from `preferences` WHERE `username`=%s"
cursor.execute(sql, user)
row = cursor.fetchone()
cursor.close()
if row is None:
message = self._("User {} does not exist").format(user)
LOG.warn(message)
self.response.error(message)
return
prefs = row.get("prefs")
if isinstance(prefs, dict):
return prefs
return json.loads(prefs)
def _post_mysql(self, user, prefs):
with self.connection.cursor() as cursor:
sql = "INSERT INTO `preferences` (`username`, `prefs`) " + \
"VALUES (%s,%s)"
cursor.execute(sql, [user, json.dumps(prefs)])
cursor.close()
self.connection.commit()
def _put_mysql(self, user, prefs):
with self.connection.cursor() as cursor:
sql = "select count(*) from preferences where username=%s"
cursor.execute(sql, user)
user_found = (cursor.fetchone()['count(*)'] == 1)
if user_found:
sql = "UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s"
cursor.execute(sql, [json.dumps(prefs), user])
cursor.close()
self.connection.commit()
if not user_found:
message = self._(
"Cannot update non-existent user {}").format(user)
LOG.warn(message)
self.response.error(message)
def _delete_mysql(self, user):
with self.connection.cursor() as cursor:
sql = "DELETE FROM `preferences` WHERE `username`=%s"
cursor.execute(sql, user)
cursor.close()
self.connection.commit()
|
normal
|
{
"blob_id": "fb787e688da975d37f9fcc39bf5e02957b186982",
"index": 7512,
"step-1": "<mask token>\n\n\nclass PreferencesSvc(service.SvcBase):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(PreferencesSvc, self).__init__(*args, **kwargs)\n config = pecan.conf.db.to_dict()\n config['cursorclass'] = pymysql.cursors.DictCursor\n self.connection = pymysql.connect(**config)\n\n @service.expose(action='GET')\n def _get(self):\n return self._get_mysql(self.data.get('user'))\n\n @service.expose(action='POST')\n def _post(self):\n self._post_mysql(self.data.get('user'), self.data.get('prefs'))\n <mask token>\n\n @service.expose(action='DELETE')\n def _delete(self):\n self._delete_mysql(self.data.get('user'))\n\n def _get_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'SELECT `prefs` from `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n row = cursor.fetchone()\n cursor.close()\n if row is None:\n message = self._('User {} does not exist').format(user)\n LOG.warn(message)\n self.response.error(message)\n return\n prefs = row.get('prefs')\n if isinstance(prefs, dict):\n return prefs\n return json.loads(prefs)\n\n def _post_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = ('INSERT INTO `preferences` (`username`, `prefs`) ' +\n 'VALUES (%s,%s)')\n cursor.execute(sql, [user, json.dumps(prefs)])\n cursor.close()\n self.connection.commit()\n\n def _put_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = 'select count(*) from preferences where username=%s'\n cursor.execute(sql, user)\n user_found = cursor.fetchone()['count(*)'] == 1\n if user_found:\n sql = 'UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s'\n cursor.execute(sql, [json.dumps(prefs), user])\n cursor.close()\n self.connection.commit()\n if not user_found:\n message = self._('Cannot update non-existent user {}').format(user)\n LOG.warn(message)\n self.response.error(message)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PreferencesSvc(service.SvcBase):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(PreferencesSvc, self).__init__(*args, **kwargs)\n config = pecan.conf.db.to_dict()\n config['cursorclass'] = pymysql.cursors.DictCursor\n self.connection = pymysql.connect(**config)\n\n @service.expose(action='GET')\n def _get(self):\n return self._get_mysql(self.data.get('user'))\n\n @service.expose(action='POST')\n def _post(self):\n self._post_mysql(self.data.get('user'), self.data.get('prefs'))\n\n @service.expose(action='PUT')\n def _put(self):\n self._put_mysql(self.data.get('user'), self.data.get('prefs'))\n\n @service.expose(action='DELETE')\n def _delete(self):\n self._delete_mysql(self.data.get('user'))\n\n def _get_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'SELECT `prefs` from `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n row = cursor.fetchone()\n cursor.close()\n if row is None:\n message = self._('User {} does not exist').format(user)\n LOG.warn(message)\n self.response.error(message)\n return\n prefs = row.get('prefs')\n if isinstance(prefs, dict):\n return prefs\n return json.loads(prefs)\n\n def _post_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = ('INSERT INTO `preferences` (`username`, `prefs`) ' +\n 'VALUES (%s,%s)')\n cursor.execute(sql, [user, json.dumps(prefs)])\n cursor.close()\n self.connection.commit()\n\n def _put_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = 'select count(*) from preferences where username=%s'\n cursor.execute(sql, user)\n user_found = cursor.fetchone()['count(*)'] == 1\n if user_found:\n sql = 'UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s'\n cursor.execute(sql, [json.dumps(prefs), user])\n cursor.close()\n self.connection.commit()\n if not user_found:\n message = self._('Cannot update non-existent user {}').format(user)\n LOG.warn(message)\n self.response.error(message)\n\n def _delete_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'DELETE FROM `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n cursor.close()\n self.connection.commit()\n",
"step-3": "<mask token>\n\n\nclass PreferencesSvc(service.SvcBase):\n \"\"\"\n Simple service to manage user preferences. User preferences are stored as\n JSON in a mysql database.\n\n The ``target`` value for this plugin is ``preferences``. See\n :ref:`rest-api` for a full description of the request and response formats.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PreferencesSvc, self).__init__(*args, **kwargs)\n config = pecan.conf.db.to_dict()\n config['cursorclass'] = pymysql.cursors.DictCursor\n self.connection = pymysql.connect(**config)\n\n @service.expose(action='GET')\n def _get(self):\n return self._get_mysql(self.data.get('user'))\n\n @service.expose(action='POST')\n def _post(self):\n self._post_mysql(self.data.get('user'), self.data.get('prefs'))\n\n @service.expose(action='PUT')\n def _put(self):\n self._put_mysql(self.data.get('user'), self.data.get('prefs'))\n\n @service.expose(action='DELETE')\n def _delete(self):\n self._delete_mysql(self.data.get('user'))\n\n def _get_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'SELECT `prefs` from `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n row = cursor.fetchone()\n cursor.close()\n if row is None:\n message = self._('User {} does not exist').format(user)\n LOG.warn(message)\n self.response.error(message)\n return\n prefs = row.get('prefs')\n if isinstance(prefs, dict):\n return prefs\n return json.loads(prefs)\n\n def _post_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = ('INSERT INTO `preferences` (`username`, `prefs`) ' +\n 'VALUES (%s,%s)')\n cursor.execute(sql, [user, json.dumps(prefs)])\n cursor.close()\n self.connection.commit()\n\n def _put_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = 'select count(*) from preferences where username=%s'\n cursor.execute(sql, user)\n user_found = cursor.fetchone()['count(*)'] == 1\n if user_found:\n sql = 'UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s'\n cursor.execute(sql, [json.dumps(prefs), user])\n cursor.close()\n self.connection.commit()\n if not user_found:\n message = self._('Cannot update non-existent user {}').format(user)\n LOG.warn(message)\n self.response.error(message)\n\n def _delete_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'DELETE FROM `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n cursor.close()\n self.connection.commit()\n",
"step-4": "import json\nfrom bll.plugins import service\nimport logging\nimport pecan\nimport pymysql.cursors\nLOG = logging.getLogger(__name__)\n\n\nclass PreferencesSvc(service.SvcBase):\n \"\"\"\n Simple service to manage user preferences. User preferences are stored as\n JSON in a mysql database.\n\n The ``target`` value for this plugin is ``preferences``. See\n :ref:`rest-api` for a full description of the request and response formats.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PreferencesSvc, self).__init__(*args, **kwargs)\n config = pecan.conf.db.to_dict()\n config['cursorclass'] = pymysql.cursors.DictCursor\n self.connection = pymysql.connect(**config)\n\n @service.expose(action='GET')\n def _get(self):\n return self._get_mysql(self.data.get('user'))\n\n @service.expose(action='POST')\n def _post(self):\n self._post_mysql(self.data.get('user'), self.data.get('prefs'))\n\n @service.expose(action='PUT')\n def _put(self):\n self._put_mysql(self.data.get('user'), self.data.get('prefs'))\n\n @service.expose(action='DELETE')\n def _delete(self):\n self._delete_mysql(self.data.get('user'))\n\n def _get_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'SELECT `prefs` from `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n row = cursor.fetchone()\n cursor.close()\n if row is None:\n message = self._('User {} does not exist').format(user)\n LOG.warn(message)\n self.response.error(message)\n return\n prefs = row.get('prefs')\n if isinstance(prefs, dict):\n return prefs\n return json.loads(prefs)\n\n def _post_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = ('INSERT INTO `preferences` (`username`, `prefs`) ' +\n 'VALUES (%s,%s)')\n cursor.execute(sql, [user, json.dumps(prefs)])\n cursor.close()\n self.connection.commit()\n\n def _put_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = 'select count(*) from preferences where username=%s'\n cursor.execute(sql, user)\n user_found = cursor.fetchone()['count(*)'] == 1\n if user_found:\n sql = 'UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s'\n cursor.execute(sql, [json.dumps(prefs), user])\n cursor.close()\n self.connection.commit()\n if not user_found:\n message = self._('Cannot update non-existent user {}').format(user)\n LOG.warn(message)\n self.response.error(message)\n\n def _delete_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = 'DELETE FROM `preferences` WHERE `username`=%s'\n cursor.execute(sql, user)\n cursor.close()\n self.connection.commit()\n",
"step-5": "# (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP\n# (c) Copyright 2017 SUSE LLC\nimport json\nfrom bll.plugins import service\nimport logging\nimport pecan\nimport pymysql.cursors\n\nLOG = logging.getLogger(__name__)\n\n\nclass PreferencesSvc(service.SvcBase):\n \"\"\"\n Simple service to manage user preferences. User preferences are stored as\n JSON in a mysql database.\n\n The ``target`` value for this plugin is ``preferences``. See\n :ref:`rest-api` for a full description of the request and response formats.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PreferencesSvc, self).__init__(*args, **kwargs)\n config = pecan.conf.db.to_dict()\n config['cursorclass'] = pymysql.cursors.DictCursor\n self.connection = pymysql.connect(**config)\n\n @service.expose(action='GET')\n def _get(self):\n return self._get_mysql(self.data.get(\"user\"))\n\n @service.expose(action='POST')\n def _post(self):\n self._post_mysql(self.data.get(\"user\"),\n self.data.get(\"prefs\"))\n\n @service.expose(action='PUT')\n def _put(self):\n self._put_mysql(self.data.get(\"user\"),\n self.data.get(\"prefs\"))\n\n @service.expose(action='DELETE')\n def _delete(self):\n self._delete_mysql(self.data.get(\"user\"))\n\n # Functions for writing\n def _get_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = \"SELECT `prefs` from `preferences` WHERE `username`=%s\"\n cursor.execute(sql, user)\n row = cursor.fetchone()\n cursor.close()\n if row is None:\n message = self._(\"User {} does not exist\").format(user)\n LOG.warn(message)\n self.response.error(message)\n return\n prefs = row.get(\"prefs\")\n if isinstance(prefs, dict):\n return prefs\n return json.loads(prefs)\n\n def _post_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = \"INSERT INTO `preferences` (`username`, `prefs`) \" + \\\n \"VALUES (%s,%s)\"\n cursor.execute(sql, [user, json.dumps(prefs)])\n cursor.close()\n self.connection.commit()\n\n def _put_mysql(self, user, prefs):\n with self.connection.cursor() as cursor:\n sql = \"select count(*) from preferences where username=%s\"\n cursor.execute(sql, user)\n user_found = (cursor.fetchone()['count(*)'] == 1)\n if user_found:\n sql = \"UPDATE `preferences` SET `prefs`=%s WHERE `username`=%s\"\n cursor.execute(sql, [json.dumps(prefs), user])\n cursor.close()\n self.connection.commit()\n if not user_found:\n message = self._(\n \"Cannot update non-existent user {}\").format(user)\n LOG.warn(message)\n self.response.error(message)\n\n def _delete_mysql(self, user):\n with self.connection.cursor() as cursor:\n sql = \"DELETE FROM `preferences` WHERE `username`=%s\"\n cursor.execute(sql, user)\n cursor.close()\n self.connection.commit()\n",
"step-ids": [
8,
10,
11,
13,
14
]
}
|
[
8,
10,
11,
13,
14
] |
<|reserved_special_token_0|>
class AdminCityTable(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdminStateModel(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key=True)
city_name = models.CharField(max_length=30, unique=True)
state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdminLoginModel(models.Model):
user_name = models.CharField(max_length=30, unique=True)
password = models.CharField(max_length=16)
class AdminStateModel(models.Model):
state_id = models.AutoField(primary_key=True)
state_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.state_name
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key=True)
city_name = models.CharField(max_length=30, unique=True)
state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
<|reserved_special_token_1|>
from django.db import models
class AdminLoginModel(models.Model):
user_name = models.CharField(max_length=30, unique=True)
password = models.CharField(max_length=16)
class AdminStateModel(models.Model):
state_id = models.AutoField(primary_key=True)
state_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.state_name
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key=True)
city_name = models.CharField(max_length=30, unique=True)
state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)
def __str__(self):
return self.city_name
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key=True)
area_name = models.CharField(max_length=30, unique=True)
city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)
def __str__(self):
return self.area_name
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key=True)
restaurant_type_name = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.restaurant_type_name
<|reserved_special_token_1|>
from django.db import models
# Login Admin Model
class AdminLoginModel(models.Model):
user_name = models.CharField(max_length=30,unique=True)
password = models.CharField(max_length=16)
# Swiggy Admin State Table
class AdminStateModel(models.Model):
state_id = models.AutoField(primary_key=True)
state_name = models.CharField(max_length=30,unique=True)
def __str__(self):
return self.state_name
# Admin City Table
class AdminCityTable(models.Model):
city_id = models.AutoField(primary_key = True)
city_name = models.CharField(max_length=30,unique=True)
state = models.ForeignKey(AdminStateModel,on_delete=models.CASCADE)
def __str__(self):
return self.city_name
#Admin Area Models for Area Operations
class AdminAreaModel(models.Model):
area_id = models.AutoField(primary_key = True)
area_name = models.CharField(max_length=30,unique=True)
city = models.ForeignKey(AdminCityTable,on_delete=models.CASCADE)
def __str__(self):
return self.area_name
#Admin Restaurant type Model
class AdminRestaurantTypeModel(models.Model):
restaurant_type_id = models.AutoField(primary_key = True)
restaurant_type_name = models.CharField(max_length=30,unique=True)
def __str__(self):
return self.restaurant_type_name
|
flexible
|
{
"blob_id": "5d4ef314bb7169f5de4795e5c1aca62a1a060bae",
"index": 772,
"step-1": "<mask token>\n\n\nclass AdminCityTable(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AdminAreaModel(models.Model):\n area_id = models.AutoField(primary_key=True)\n area_name = models.CharField(max_length=30, unique=True)\n city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.area_name\n\n\nclass AdminRestaurantTypeModel(models.Model):\n restaurant_type_id = models.AutoField(primary_key=True)\n restaurant_type_name = models.CharField(max_length=30, unique=True)\n\n def __str__(self):\n return self.restaurant_type_name\n",
"step-2": "<mask token>\n\n\nclass AdminStateModel(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AdminCityTable(models.Model):\n city_id = models.AutoField(primary_key=True)\n city_name = models.CharField(max_length=30, unique=True)\n state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.city_name\n\n\nclass AdminAreaModel(models.Model):\n area_id = models.AutoField(primary_key=True)\n area_name = models.CharField(max_length=30, unique=True)\n city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.area_name\n\n\nclass AdminRestaurantTypeModel(models.Model):\n restaurant_type_id = models.AutoField(primary_key=True)\n restaurant_type_name = models.CharField(max_length=30, unique=True)\n\n def __str__(self):\n return self.restaurant_type_name\n",
"step-3": "<mask token>\n\n\nclass AdminLoginModel(models.Model):\n user_name = models.CharField(max_length=30, unique=True)\n password = models.CharField(max_length=16)\n\n\nclass AdminStateModel(models.Model):\n state_id = models.AutoField(primary_key=True)\n state_name = models.CharField(max_length=30, unique=True)\n\n def __str__(self):\n return self.state_name\n\n\nclass AdminCityTable(models.Model):\n city_id = models.AutoField(primary_key=True)\n city_name = models.CharField(max_length=30, unique=True)\n state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.city_name\n\n\nclass AdminAreaModel(models.Model):\n area_id = models.AutoField(primary_key=True)\n area_name = models.CharField(max_length=30, unique=True)\n city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.area_name\n\n\nclass AdminRestaurantTypeModel(models.Model):\n restaurant_type_id = models.AutoField(primary_key=True)\n restaurant_type_name = models.CharField(max_length=30, unique=True)\n\n def __str__(self):\n return self.restaurant_type_name\n",
"step-4": "from django.db import models\n\n\nclass AdminLoginModel(models.Model):\n user_name = models.CharField(max_length=30, unique=True)\n password = models.CharField(max_length=16)\n\n\nclass AdminStateModel(models.Model):\n state_id = models.AutoField(primary_key=True)\n state_name = models.CharField(max_length=30, unique=True)\n\n def __str__(self):\n return self.state_name\n\n\nclass AdminCityTable(models.Model):\n city_id = models.AutoField(primary_key=True)\n city_name = models.CharField(max_length=30, unique=True)\n state = models.ForeignKey(AdminStateModel, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.city_name\n\n\nclass AdminAreaModel(models.Model):\n area_id = models.AutoField(primary_key=True)\n area_name = models.CharField(max_length=30, unique=True)\n city = models.ForeignKey(AdminCityTable, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.area_name\n\n\nclass AdminRestaurantTypeModel(models.Model):\n restaurant_type_id = models.AutoField(primary_key=True)\n restaurant_type_name = models.CharField(max_length=30, unique=True)\n\n def __str__(self):\n return self.restaurant_type_name\n",
"step-5": "from django.db import models\n\n\n# Login Admin Model\nclass AdminLoginModel(models.Model):\n user_name = models.CharField(max_length=30,unique=True)\n password = models.CharField(max_length=16)\n\n\n\n# Swiggy Admin State Table\n\nclass AdminStateModel(models.Model):\n state_id = models.AutoField(primary_key=True)\n state_name = models.CharField(max_length=30,unique=True)\n\n def __str__(self):\n return self.state_name\n\n# Admin City Table\nclass AdminCityTable(models.Model):\n city_id = models.AutoField(primary_key = True)\n city_name = models.CharField(max_length=30,unique=True)\n state = models.ForeignKey(AdminStateModel,on_delete=models.CASCADE)\n\n def __str__(self):\n return self.city_name\n \n#Admin Area Models for Area Operations\nclass AdminAreaModel(models.Model):\n area_id = models.AutoField(primary_key = True)\n area_name = models.CharField(max_length=30,unique=True)\n\n city = models.ForeignKey(AdminCityTable,on_delete=models.CASCADE)\n def __str__(self):\n return self.area_name\n\n#Admin Restaurant type Model\n\nclass AdminRestaurantTypeModel(models.Model):\n restaurant_type_id = models.AutoField(primary_key = True)\n restaurant_type_name = models.CharField(max_length=30,unique=True)\n\n def __str__(self):\n return self.restaurant_type_name\n\n\n\n\n\n",
"step-ids": [
7,
10,
14,
15,
16
]
}
|
[
7,
10,
14,
15,
16
] |
<|reserved_special_token_0|>
class Sudoku:
def __init__(self, grid):
"""
Initializes the grid
"""
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
"""
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
"""
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i, j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
"""
Returs the possible set of numbers of a particular row and column
"""
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +
cell))
return missing_num
def cell_3by3(self, row, column):
"""
Returns grid of 3 X 3
"""
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b:
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
"""
Returns rows and columns
"""
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
"""
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
row_flatten = sum(row, [])
single_values = [i for i, j in Counter(row_flatten).items() if j == 1]
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
column_flatten = sum(column, [])
column_single_values = [i for i, j in Counter(column_flatten).items
() if j == 1]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
<|reserved_special_token_0|>
def step_3(self, sub_grid, num):
pass
def perform(self):
"""
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
"""
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
"""
Solves the Sub grid and prints the sub grid
Returns None
"""
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Sudoku:
def __init__(self, grid):
"""
Initializes the grid
"""
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
"""
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
"""
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i, j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
"""
Returs the possible set of numbers of a particular row and column
"""
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +
cell))
return missing_num
def cell_3by3(self, row, column):
"""
Returns grid of 3 X 3
"""
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b:
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
"""
Returns rows and columns
"""
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
"""
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
row_flatten = sum(row, [])
single_values = [i for i, j in Counter(row_flatten).items() if j == 1]
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
column_flatten = sum(column, [])
column_single_values = [i for i, j in Counter(column_flatten).items
() if j == 1]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
"""
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
"""
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
"""
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
"""
Solves the Sub grid and prints the sub grid
Returns None
"""
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
<|reserved_special_token_0|>
mat.solve()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Sudoku:
def __init__(self, grid):
"""
Initializes the grid
"""
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
"""
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
"""
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i, j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
"""
Returs the possible set of numbers of a particular row and column
"""
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +
cell))
return missing_num
def cell_3by3(self, row, column):
"""
Returns grid of 3 X 3
"""
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b:
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
"""
Returns rows and columns
"""
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
"""
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
row_flatten = sum(row, [])
single_values = [i for i, j in Counter(row_flatten).items() if j == 1]
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
column_flatten = sum(column, [])
column_single_values = [i for i, j in Counter(column_flatten).items
() if j == 1]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
"""
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
"""
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
"""
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
"""
Solves the Sub grid and prints the sub grid
Returns None
"""
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
grid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,
0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,
0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,
9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]
mat = Sudoku(grid)
mat.solve()
<|reserved_special_token_1|>
from pprint import pprint
from collections import Counter
from copy import deepcopy
class Sudoku:
def __init__(self, grid):
"""
Initializes the grid
"""
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
"""
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
"""
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i, j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
"""
Returs the possible set of numbers of a particular row and column
"""
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +
cell))
return missing_num
def cell_3by3(self, row, column):
"""
Returns grid of 3 X 3
"""
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b:
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
"""
Returns rows and columns
"""
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
"""
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
row_flatten = sum(row, [])
single_values = [i for i, j in Counter(row_flatten).items() if j == 1]
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
column_flatten = sum(column, [])
column_single_values = [i for i, j in Counter(column_flatten).items
() if j == 1]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
"""
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
"""
row, column = self.row_and_column(sub_grid, num, num)
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
"""
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
"""
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
"""
Solves the Sub grid and prints the sub grid
Returns None
"""
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
grid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,
0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,
0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,
9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]
mat = Sudoku(grid)
mat.solve()
<|reserved_special_token_1|>
from pprint import pprint
from collections import Counter
from copy import deepcopy
class Sudoku():
def __init__(self, grid):
'''
Initializes the grid
'''
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
'''
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
'''
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i,j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
'''
Returs the possible set of numbers of a particular row and column
'''
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))
return missing_num
def cell_3by3(self, row, column):
'''
Returns grid of 3 X 3
'''
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b :
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
'''
Returns rows and columns
'''
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
'''
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
row_flatten = sum(row,[])
single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]
# For Rows
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
# For Columns
column_flatten = sum(column, [])
column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
'''
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
# For Rows
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
# For Columns
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
'''
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
'''
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
'''
Solves the Sub grid and prints the sub grid
Returns None
'''
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
# grid = [
# [0,3,0,0,1,0,0,6,0],
# [7,5,0,0,3,0,0,4,8],
# [0,0,6,9,8,4,3,0,0],
# [0,0,3,0,0,0,8,0,0],
# [9,1,2,0,0,0,6,7,4],
# [0,0,4,0,0,0,5,0,0],
# [0,0,1,6,7,5,2,0,0],
# [6,8,0,0,9,0,0,1,5],
# [0,9,0,0,4,0,0,3,0]
# ]
# grid = [
# [6,0,0,1,0,8,2,0,3],
# [0,2,0,0,4,0,0,9,0],
# [8,0,3,0,0,5,4,0,0],
# [5,0,4,6,0,7,0,0,9],
# [0,3,0,0,0,0,0,5,0],
# [7,0,0,8,0,3,1,0,2],
# [0,0,1,7,0,0,9,0,6],
# [0,8,0,0,3,0,0,2,0],
# [3,0,2,9,0,4,0,0,5]
# ]
grid = [
[8,0,6,0,0,0,4,0,9],
[0,0,0,0,0,0,0,0,0],
[0,9,2,0,0,0,5,0,8],
[0,0,9,0,7,1,3,0,0],
[5,0,8,0,0,0,0,2,0],
[0,0,4,0,5,0,0,0,0],
[0,0,0,0,0,7,9,1,0],
[0,0,0,9,0,0,0,0,7],
[0,7,0,0,0,3,0,0,4],
]
mat = Sudoku(grid)
mat.solve()
|
flexible
|
{
"blob_id": "4032503bba8a1dd273015d503f52b6ea2d932d1d",
"index": 3564,
"step-1": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n <mask token>\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\n<mask token>\nmat.solve()\n",
"step-3": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\ngrid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,\n 0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,\n 0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,\n 9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]\nmat = Sudoku(grid)\nmat.solve()\n",
"step-4": "from pprint import pprint\nfrom collections import Counter\nfrom copy import deepcopy\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\ngrid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,\n 0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,\n 0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,\n 9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]\nmat = Sudoku(grid)\nmat.solve()\n",
"step-5": "\r\n\r\n\r\nfrom pprint import pprint\r\nfrom collections import Counter\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Sudoku():\r\n def __init__(self, grid):\r\n '''\r\n Initializes the grid\r\n '''\r\n self.grid = grid\r\n self.sub_grid = self.create_sub_grid(self.grid)\r\n\r\n def create_sub_grid(self, grid):\r\n ''' \r\n Creates a Sub grid, containing the possible numbers within a cell\r\n Returns a Sub grid\r\n '''\r\n sub_grid = []\r\n for i in range(9):\r\n sub = []\r\n for j in range(9):\r\n if grid[i][j] == 0:\r\n sub.append(self.missing_numbers(i,j))\r\n else:\r\n sub.append([grid[i][j]])\r\n sub_grid.append(sub)\r\n del sub\r\n return sub_grid\r\n\r\n\r\n def missing_numbers(self, row, column):\r\n '''\r\n Returs the possible set of numbers of a particular row and column\r\n '''\r\n\r\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\r\n cell = self.cell_3by3(row, column)\r\n \r\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))\r\n return missing_num\r\n\r\n\r\n\r\n def cell_3by3(self, row, column):\r\n '''\r\n Returns grid of 3 X 3\r\n '''\r\n\r\n cell = []\r\n a = row // 3\r\n b = column // 3\r\n for i in range(9):\r\n for j in range(9):\r\n if i // 3 == a and j // 3 == b : \r\n cell.append(grid[i][j])\r\n return cell\r\n\r\n def row_and_column(self, grid, row, column): \r\n '''\r\n Returns rows and columns\r\n '''\r\n r = grid[row]\r\n c = []\r\n for j in range(9):\r\n c.append(grid[j][column])\r\n return r, c\r\n\r\n\r\n\r\n\r\n def step_1(self, sub_grid, num):\r\n '''\r\n Reducing a list of clues to a single value based on row and column elimination\r\n Returns a refined sub grid\r\n '''\r\n\r\n\r\n row,column = self.row_and_column(sub_grid,num,num)\r\n\r\n row_flatten = sum(row,[])\r\n single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]\r\n\r\n # For Rows\r\n for i in range(len(sub_grid)):\r\n for j in single_values:\r\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\r\n sub_grid[num][i] = [j] \r\n\r\n # For Columns\r\n column_flatten = sum(column, [])\r\n column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]\r\n for i in range(len(sub_grid)):\r\n for j in column_single_values:\r\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\r\n sub_grid[i][num] = [j]\r\n\r\n\r\n\r\n return sub_grid\r\n\r\n def step_2(self, sub_grid, num):\r\n '''\r\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\r\n Returns refined sub grid\r\n '''\r\n\r\n row,column = self.row_and_column(sub_grid,num,num)\r\n\r\n # For Rows\r\n single_value_list = []\r\n for i in range(len(row)):\r\n if len(sub_grid[num][i]) == 1:\r\n single_value_list.append(sub_grid[num][i])\r\n single_value_list_flatten = sum(single_value_list, [])\r\n\r\n for i in range(len(sub_grid)):\r\n if len(sub_grid[num][i]) != 1: \r\n for j in single_value_list_flatten:\r\n if j in sub_grid[num][i]:\r\n sub_grid[num][i].remove(j)\r\n\r\n # For Columns\r\n single_value_list = []\r\n for i in range(len(column)):\r\n if len(sub_grid[i][num]) == 1:\r\n single_value_list.append(sub_grid[i][num])\r\n single_value_list_flatten = sum(single_value_list, [])\r\n\r\n for i in range(len(sub_grid)):\r\n if len(sub_grid[i][num]) != 1: \r\n for j in single_value_list_flatten:\r\n if j in sub_grid[i][num]:\r\n sub_grid[i][num].remove(j)\r\n\r\n return sub_grid\r\n\r\n def step_3(self, sub_grid, num):\r\n pass\r\n\r\n \r\n\r\n\r\n def perform(self):\r\n '''\r\n Performs the step_1 and step_2 untill the Sub grid is solved\r\n Returns None\r\n '''\r\n\r\n temp = []\r\n while self.sub_grid != temp: \r\n temp = deepcopy(self.sub_grid) \r\n for i in range(len(grid)):\r\n self.sub_grid = self.step_1(self.sub_grid, i)\r\n self.sub_grid = self.step_2(self.sub_grid, i)\r\n\r\n\r\n def solve(self):\r\n '''\r\n Solves the Sub grid and prints the sub grid\r\n Returns None\r\n '''\r\n\r\n self.perform()\r\n for i in range(9):\r\n for j in range(9):\r\n print(self.sub_grid[i][j], end=' ')\r\n print()\r\n\r\n\r\n# grid = [\r\n# [0,3,0,0,1,0,0,6,0],\r\n# [7,5,0,0,3,0,0,4,8],\r\n# [0,0,6,9,8,4,3,0,0],\r\n# [0,0,3,0,0,0,8,0,0],\r\n# [9,1,2,0,0,0,6,7,4],\r\n# [0,0,4,0,0,0,5,0,0],\r\n# [0,0,1,6,7,5,2,0,0],\r\n# [6,8,0,0,9,0,0,1,5],\r\n# [0,9,0,0,4,0,0,3,0]\r\n# ]\r\n\r\n# grid = [\r\n# [6,0,0,1,0,8,2,0,3],\r\n# [0,2,0,0,4,0,0,9,0],\r\n# [8,0,3,0,0,5,4,0,0],\r\n# [5,0,4,6,0,7,0,0,9],\r\n# [0,3,0,0,0,0,0,5,0],\r\n# [7,0,0,8,0,3,1,0,2],\r\n# [0,0,1,7,0,0,9,0,6],\r\n# [0,8,0,0,3,0,0,2,0],\r\n# [3,0,2,9,0,4,0,0,5]\r\n# ]\r\ngrid = [\r\n [8,0,6,0,0,0,4,0,9],\r\n [0,0,0,0,0,0,0,0,0],\r\n [0,9,2,0,0,0,5,0,8],\r\n [0,0,9,0,7,1,3,0,0],\r\n [5,0,8,0,0,0,0,2,0],\r\n [0,0,4,0,5,0,0,0,0],\r\n [0,0,0,0,0,7,9,1,0],\r\n [0,0,0,9,0,0,0,0,7],\r\n [0,7,0,0,0,3,0,0,4],\r\n]\r\n\r\nmat = Sudoku(grid)\r\nmat.solve()\r\n",
"step-ids": [
10,
12,
13,
14,
15
]
}
|
[
10,
12,
13,
14,
15
] |
'''
Created on June 24, 2019
@author: Andrew Habib
'''
import json
import jsonref
import sys
from jsonsubschema.api import isSubschema
def main():
assert len(
sys.argv) == 3, "jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema"
s1_file = sys.argv[1]
s2_file = sys.argv[2]
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
# s1 = jsonref.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
# s2 = jsonref.load(f2)
print("LHS <: RHS", isSubschema(s1, s2))
print("RHS <: LHS", isSubschema(s2, s1))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "ba78a1e29736c4f109a0efc6f5b9993994661058",
"index": 3527,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n assert len(sys.argv\n ) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n print('LHS <: RHS', isSubschema(s1, s2))\n print('RHS <: LHS', isSubschema(s2, s1))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n assert len(sys.argv\n ) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n print('LHS <: RHS', isSubschema(s1, s2))\n print('RHS <: LHS', isSubschema(s2, s1))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport json\nimport jsonref\nimport sys\nfrom jsonsubschema.api import isSubschema\n\n\ndef main():\n assert len(sys.argv\n ) == 3, 'jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema'\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n print('LHS <: RHS', isSubschema(s1, s2))\n print('RHS <: LHS', isSubschema(s2, s1))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nCreated on June 24, 2019\n@author: Andrew Habib\n'''\n\nimport json\nimport jsonref\nimport sys\n\nfrom jsonsubschema.api import isSubschema\n\n\ndef main():\n\n assert len(\n sys.argv) == 3, \"jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema\"\n\n s1_file = sys.argv[1]\n s2_file = sys.argv[2]\n\n with open(s1_file, 'r') as f1:\n s1 = json.load(f1)\n # s1 = jsonref.load(f1)\n with open(s2_file, 'r') as f2:\n s2 = json.load(f2)\n # s2 = jsonref.load(f2)\n\n print(\"LHS <: RHS\", isSubschema(s1, s2))\n print(\"RHS <: LHS\", isSubschema(s2, s1))\n\n\nif __name__ == \"__main__\":\n\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('news/', NewsCreateListView.as_view()), path(
'news_detailed/<int:id>/', NewsDetailGenericView.as_view())]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from .views import NewsCreateListView, NewsDetailGenericView
urlpatterns = [path('news/', NewsCreateListView.as_view()), path(
'news_detailed/<int:id>/', NewsDetailGenericView.as_view())]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from .views import NewsCreateListView, NewsDetailGenericView
urlpatterns = [
path('news/', NewsCreateListView.as_view()),
path('news_detailed/<int:id>/', NewsDetailGenericView.as_view()),
]
|
flexible
|
{
"blob_id": "afdb14d60374049753b3c980c717a13456c7ff5c",
"index": 9745,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('news/', NewsCreateListView.as_view()), path(\n 'news_detailed/<int:id>/', NewsDetailGenericView.as_view())]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom .views import NewsCreateListView, NewsDetailGenericView\nurlpatterns = [path('news/', NewsCreateListView.as_view()), path(\n 'news_detailed/<int:id>/', NewsDetailGenericView.as_view())]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom .views import NewsCreateListView, NewsDetailGenericView\n\n\nurlpatterns = [\n path('news/', NewsCreateListView.as_view()),\n path('news_detailed/<int:id>/', NewsDetailGenericView.as_view()),\n\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import crawl
import logging
from elasticsearch import Elasticsearch
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger("crawl").setLevel(logging.INFO)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, "aaronparecki.com")
|
normal
|
{
"blob_id": "21d07c2b80aa00d0c75da342d37195b6829593b6",
"index": 1110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('crawl').setLevel(logging.INFO)\n logging.getLogger('elasticsearch').setLevel(logging.ERROR)\n es = Elasticsearch()\n crawl.crawl_domain(es, 'aaronparecki.com')\n",
"step-3": "import crawl\nimport logging\nfrom elasticsearch import Elasticsearch\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('crawl').setLevel(logging.INFO)\n logging.getLogger('elasticsearch').setLevel(logging.ERROR)\n es = Elasticsearch()\n crawl.crawl_domain(es, 'aaronparecki.com')\n",
"step-4": "#!/usr/bin/env python \nimport crawl\nimport logging\nfrom elasticsearch import Elasticsearch\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n logging.getLogger(\"crawl\").setLevel(logging.INFO)\n logging.getLogger(\"elasticsearch\").setLevel(logging.ERROR)\n \n es = Elasticsearch()\n crawl.crawl_domain(es, \"aaronparecki.com\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from function import *
from .propogation import optimize
from .initialize import initialize_with_zeros
def predict(weight, intercept, x_vector):
"""
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
"""
m = x_vector.shape[1]
y_prediction = np.zeros((1, m))
weight = weight.reshape(x_vector.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)
for i in range(yhat.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if yhat[0][i] > 0.5:
y_prediction[0][i] = 1
else:
y_prediction[0][i] = 0
assert (y_prediction.shape == (1, m))
return y_prediction
class Logistic(object):
"""
This class provides the flexibility to run
logistic regression to your data set
"""
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
# Initializing the test & training set
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
# initialize parameters with zeros (≈ 1 line of code)
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(weight,
intercept,
self._x_train,
self._y_train,
self.num_iteration,
self.learning_rate
)
# Retrieve parameters w and b from dictionary "parameters"
weight = parameters["w"]
intercept = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_train - self._y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - self._x_test)) * 100))
return {"costs": costs,
"Y_prediction_test": y_prediction_test,
"Y_prediction_train": y_prediction_train,
"w": weight,
"b": intercept,
"learning_rate": self.learning_rate,
"num_iterations": self.num_iteration}
|
normal
|
{
"blob_id": "63360ec9693a916375b49d0881008b1d7d4ec953",
"index": 4546,
"step-1": "<mask token>\n\n\nclass Logistic(object):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-2": "<mask token>\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-3": "<mask token>\n\n\ndef predict(weight, intercept, x_vector):\n \"\"\"\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n\n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n \"\"\"\n m = x_vector.shape[1]\n y_prediction = np.zeros((1, m))\n weight = weight.reshape(x_vector.shape[0], 1)\n yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)\n for i in range(yhat.shape[1]):\n if yhat[0][i] > 0.5:\n y_prediction[0][i] = 1\n else:\n y_prediction[0][i] = 0\n assert y_prediction.shape == (1, m)\n return y_prediction\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-4": "from function import *\nfrom .propogation import optimize\nfrom .initialize import initialize_with_zeros\n\n\ndef predict(weight, intercept, x_vector):\n \"\"\"\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n\n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n \"\"\"\n m = x_vector.shape[1]\n y_prediction = np.zeros((1, m))\n weight = weight.reshape(x_vector.shape[0], 1)\n yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)\n for i in range(yhat.shape[1]):\n if yhat[0][i] > 0.5:\n y_prediction[0][i] = 1\n else:\n y_prediction[0][i] = 0\n assert y_prediction.shape == (1, m)\n return y_prediction\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-5": "from function import *\nfrom .propogation import optimize\nfrom .initialize import initialize_with_zeros\n\n\ndef predict(weight, intercept, x_vector):\n \"\"\"\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n\n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n \"\"\"\n\n m = x_vector.shape[1]\n y_prediction = np.zeros((1, m))\n weight = weight.reshape(x_vector.shape[0], 1)\n\n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)\n for i in range(yhat.shape[1]):\n\n # Convert probabilities A[0,i] to actual predictions p[0,i]\n if yhat[0][i] > 0.5:\n y_prediction[0][i] = 1\n else:\n y_prediction[0][i] = 0\n\n assert (y_prediction.shape == (1, m))\n\n return y_prediction\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n # Initializing the test & training set\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n # initialize parameters with zeros (≈ 1 line of code)\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = optimize(weight,\n intercept,\n self._x_train,\n self._y_train,\n self.num_iteration,\n self.learning_rate\n )\n\n # Retrieve parameters w and b from dictionary \"parameters\"\n weight = parameters[\"w\"]\n intercept = parameters[\"b\"]\n\n # Predict test/train set examples (≈ 2 lines of code)\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(y_prediction_train - self._y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(y_prediction_test - self._x_test)) * 100))\n\n return {\"costs\": costs,\n \"Y_prediction_test\": y_prediction_test,\n \"Y_prediction_train\": y_prediction_train,\n \"w\": weight,\n \"b\": intercept,\n \"learning_rate\": self.learning_rate,\n \"num_iterations\": self.num_iteration}\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def dot_product(a, b):
ans = 0
for i in range(len(a)):
ans += a[i] * b[i]
return ans
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def dot_product(a, b):
ans = 0
for i in range(len(a)):
ans += a[i] * b[i]
return ans
<|reserved_special_token_0|>
print(dot_product(a, b))
<|reserved_special_token_1|>
def dot_product(a, b):
ans = 0
for i in range(len(a)):
ans += a[i] * b[i]
return ans
n = int(input())
a = sorted(list(map(int, input().split())))
b = sorted(list(map(int, input().split())))
print(dot_product(a, b))
|
flexible
|
{
"blob_id": "fc273a286a462cb673edaa2de2ecc6b9ca631004",
"index": 9824,
"step-1": "<mask token>\n",
"step-2": "def dot_product(a, b):\n ans = 0\n for i in range(len(a)):\n ans += a[i] * b[i]\n return ans\n\n\n<mask token>\n",
"step-3": "def dot_product(a, b):\n ans = 0\n for i in range(len(a)):\n ans += a[i] * b[i]\n return ans\n\n\n<mask token>\nprint(dot_product(a, b))\n",
"step-4": "def dot_product(a, b):\n ans = 0\n for i in range(len(a)):\n ans += a[i] * b[i]\n return ans\n\n\nn = int(input())\na = sorted(list(map(int, input().split())))\nb = sorted(list(map(int, input().split())))\nprint(dot_product(a, b))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# @Time : 2018/12/13 21:32
# @Author : sundongjian
# @Email : [email protected]
# @File : __init__.py.py
# @Software: PyCharm
|
normal
|
{
"blob_id": "00ec56420831d8f4ab14259c7b07f1be0bcb7d78",
"index": 9161,
"step-1": "# -*- coding: utf-8 -*-\r\n# @Time : 2018/12/13 21:32\r\n# @Author : sundongjian\r\n# @Email : [email protected]\r\n# @File : __init__.py.py\r\n# @Software: PyCharm",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rc('font', family=font_name)
<|reserved_special_token_0|>
print(df1)
df1.plot()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
font_name = font_manager.FontProperties(fname='c:/windows/Fonts/malgun.ttf'
).get_name()
rc('font', family=font_name)
rcParams['axes.unicode_minus'] = False
df1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019',
periods=100), columns=['A', 'B', 'C']).cumsum()
print(df1)
df1.plot()
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc, rcParams
font_name = font_manager.FontProperties(fname='c:/windows/Fonts/malgun.ttf'
).get_name()
rc('font', family=font_name)
rcParams['axes.unicode_minus'] = False
df1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019',
periods=100), columns=['A', 'B', 'C']).cumsum()
print(df1)
df1.plot()
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 차트에 한글 가능하도록
from matplotlib import font_manager, rc, rcParams
font_name = font_manager.FontProperties(
fname="c:/windows/Fonts/malgun.ttf").get_name()
rc('font',family=font_name)
rcParams['axes.unicode_minus'] = False # 부호표시 (-,+) 사용할때
###
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 100행 3열 랜덤생성 2019,1,1 부터 100일
df1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019', periods=100),
columns=['A','B','C']).cumsum() # 값을 누적 시켜 넣는다.
print(df1)
# pandas 의 DataFrame 에서 내부적으로 matplotlib 를 import 해서 연결되어 있기때문에 plot 함수를 사용해서 그려준다.
df1.plot()
plt.show()
|
flexible
|
{
"blob_id": "fb82724aab7e0819c9921d41dcb612b304b25753",
"index": 9723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrc('font', family=font_name)\n<mask token>\nprint(df1)\ndf1.plot()\nplt.show()\n",
"step-3": "<mask token>\nfont_name = font_manager.FontProperties(fname='c:/windows/Fonts/malgun.ttf'\n ).get_name()\nrc('font', family=font_name)\nrcParams['axes.unicode_minus'] = False\ndf1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019',\n periods=100), columns=['A', 'B', 'C']).cumsum()\nprint(df1)\ndf1.plot()\nplt.show()\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager, rc, rcParams\nfont_name = font_manager.FontProperties(fname='c:/windows/Fonts/malgun.ttf'\n ).get_name()\nrc('font', family=font_name)\nrcParams['axes.unicode_minus'] = False\ndf1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019',\n periods=100), columns=['A', 'B', 'C']).cumsum()\nprint(df1)\ndf1.plot()\nplt.show()\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# 차트에 한글 가능하도록\nfrom matplotlib import font_manager, rc, rcParams\nfont_name = font_manager.FontProperties(\n fname=\"c:/windows/Fonts/malgun.ttf\").get_name()\nrc('font',family=font_name)\nrcParams['axes.unicode_minus'] = False # 부호표시 (-,+) 사용할때\n###\n#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n# 100행 3열 랜덤생성 2019,1,1 부터 100일\ndf1 = pd.DataFrame(np.random.randn(100, 3), index=pd.date_range('1/1/2019', periods=100),\n columns=['A','B','C']).cumsum() # 값을 누적 시켜 넣는다.\n\nprint(df1)\n\n# pandas 의 DataFrame 에서 내부적으로 matplotlib 를 import 해서 연결되어 있기때문에 plot 함수를 사용해서 그려준다.\ndf1.plot()\nplt.show()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def merge_model(model_1, model_2):
"""
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
"""
inp1 = model_1.input
inp2 = model_2.input
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
"""
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
"""
origin_model = model
for layer in origin_model.layers:
layer.trainable = False
inp = origin_model.input
x = origin_model.output
den = Dense(512, name='fine_dense')(x)
l = Dropout(0.5)(den)
result = Dense(10, activation='softmax')(l)
model = Model(input=inp, outputs=result)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge_model(model_1, model_2):
"""
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
"""
inp1 = model_1.input
inp2 = model_2.input
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
"""
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
"""
origin_model = model
for layer in origin_model.layers:
layer.trainable = False
inp = origin_model.input
x = origin_model.output
den = Dense(512, name='fine_dense')(x)
l = Dropout(0.5)(den)
result = Dense(10, activation='softmax')(l)
model = Model(input=inp, outputs=result)
return model
<|reserved_special_token_0|>
model1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,
padding='same', activation='relu'))
model1.add(MaxPooling1D(pool_size=2, padding='same'))
model1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=
'same', activation='relu'))
model1.add(MaxPooling1D(pool_size=2, padding='same'))
<|reserved_special_token_0|>
model1.add(LSTM(32, return_sequences=True))
model1.add(Flatten())
<|reserved_special_token_0|>
model2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,
padding='same', activation='relu'))
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),
padding='same', activation='relu'))
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
<|reserved_special_token_0|>
print('model2两层卷积后的输出形状:', model2.output_shape)
model2.add(Reshape((64, 16)))
model2.add(LSTM(32, return_sequences=True))
model2.add(Flatten())
<|reserved_special_token_0|>
model.summary()
print('model.outputs:', model.output.shape)
<|reserved_special_token_0|>
print(model.summary())
plot_model(model, to_file='model/1D2DLSTM_cross.png')
<|reserved_special_token_0|>
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[
'accuracy'])
model.save('model/1D2DLSTM_cross.h5')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge_model(model_1, model_2):
"""
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
"""
inp1 = model_1.input
inp2 = model_2.input
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
"""
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
"""
origin_model = model
for layer in origin_model.layers:
layer.trainable = False
inp = origin_model.input
x = origin_model.output
den = Dense(512, name='fine_dense')(x)
l = Dropout(0.5)(den)
result = Dense(10, activation='softmax')(l)
model = Model(input=inp, outputs=result)
return model
input_shape_1D = 1024, 1
input_shape_2D = 32, 32, 1
model1 = Sequential()
model1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,
padding='same', activation='relu'))
model1.add(MaxPooling1D(pool_size=2, padding='same'))
model1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=
'same', activation='relu'))
model1.add(MaxPooling1D(pool_size=2, padding='same'))
<|reserved_special_token_0|>
model1.add(LSTM(32, return_sequences=True))
model1.add(Flatten())
model2 = Sequential()
model2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,
padding='same', activation='relu'))
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),
padding='same', activation='relu'))
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
<|reserved_special_token_0|>
print('model2两层卷积后的输出形状:', model2.output_shape)
model2.add(Reshape((64, 16)))
model2.add(LSTM(32, return_sequences=True))
model2.add(Flatten())
model = merge_model(model1, model2)
model.summary()
print('model.outputs:', model.output.shape)
model = addLayers_model(model)
print(model.summary())
plot_model(model, to_file='model/1D2DLSTM_cross.png')
adam = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[
'accuracy'])
model.save('model/1D2DLSTM_cross.h5')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import keras
from keras.models import Sequential
from keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout, LSTM, Reshape
from keras import Model
from keras.utils import plot_model
def merge_model(model_1, model_2):
"""
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
"""
inp1 = model_1.input
inp2 = model_2.input
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
"""
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
"""
origin_model = model
for layer in origin_model.layers:
layer.trainable = False
inp = origin_model.input
x = origin_model.output
den = Dense(512, name='fine_dense')(x)
l = Dropout(0.5)(den)
result = Dense(10, activation='softmax')(l)
model = Model(input=inp, outputs=result)
return model
input_shape_1D = 1024, 1
input_shape_2D = 32, 32, 1
model1 = Sequential()
model1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,
padding='same', activation='relu'))
model1.add(MaxPooling1D(pool_size=2, padding='same'))
model1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=
'same', activation='relu'))
model1.add(MaxPooling1D(pool_size=2, padding='same'))
<|reserved_special_token_0|>
model1.add(LSTM(32, return_sequences=True))
model1.add(Flatten())
model2 = Sequential()
model2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,
padding='same', activation='relu'))
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),
padding='same', activation='relu'))
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
<|reserved_special_token_0|>
print('model2两层卷积后的输出形状:', model2.output_shape)
model2.add(Reshape((64, 16)))
model2.add(LSTM(32, return_sequences=True))
model2.add(Flatten())
model = merge_model(model1, model2)
model.summary()
print('model.outputs:', model.output.shape)
model = addLayers_model(model)
print(model.summary())
plot_model(model, to_file='model/1D2DLSTM_cross.png')
adam = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[
'accuracy'])
model.save('model/1D2DLSTM_cross.h5')
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: utf-8
'''
1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的
'''
# 导入相关的包
import keras
# 导入相关层的结构
from keras.models import Sequential
from keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape
from keras import Model
# 可视化神经网络
from keras.utils import plot_model
def merge_model(model_1, model_2):
'''
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
'''
# model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重
# model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习)
inp1 = model_1.input # 第一个模型的参数
inp2 = model_2.input # 第二个模型的参数
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
'''
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
'''
origin_model = model
for layer in origin_model.layers:
layer.trainable = False # 原来的不训练,冻结网络层
inp = origin_model.input
x = origin_model.output
den = Dense(512, name="fine_dense")(x)
l = Dropout(0.5)(den)
result = Dense(10, activation="softmax")(l)
model = Model(input=inp, outputs=result)
return model
input_shape_1D = (1024, 1)
input_shape_2D = (32, 32, 1)
# 构建模型
# 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax)
# ====================1、 1D部分 ==============================
model1 = Sequential()
# Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3
model1.add(Conv1D(filters=8,
kernel_size=(3),
input_shape=input_shape_1D,
padding='same',
activation='relu'))
# MaxPooling1D:8 @ 1*512。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
# Conv1D:16 @ 1*512。16个过滤器,大小设置为3
model1.add(Conv1D(filters=16,
kernel_size=(3),
input_shape=(1, 512),
padding='same',
activation='relu'))
# MaxPooling1D:16 @ 1*256。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
'''
# Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3
model1.add(Conv1D(filters=16,
kernel_size=(3),
input_shape=(1, 512),
padding='same',
activation='relu'))
# MaxPooling1D:16 @ 1*128。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
'''
model1.add(LSTM(32,return_sequences=True))
model1.add(Flatten()) # 压平:将输出压平为1维
# =============================================================
# ============ ======== 2、 2D部分 ============================
model2 = Sequential()
# Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3
model2.add(Conv2D(filters=8,
kernel_size=(3, 3),
input_shape=input_shape_2D,
padding='same',
activation='relu'))
# MaxPooling2D:8 @ 16*16。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
# Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3
model2.add(Conv2D(filters=16,
kernel_size=(3, 3),
input_shape=(16, 16, 1),
padding='same',
activation='relu'))
# MaxPooling2D:16 @ 8*8。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
'''
# Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3
model2.add(Conv2D(filters=16,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'))
# MaxPooling2D:16 @ 4*4。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
'''
print("model2两层卷积后的输出形状:",model2.output_shape) # (None,4,4,16)
model2.add(Reshape((64,16))) #(None,16,16)
model2.add(LSTM(32,return_sequences=True))
model2.add(Flatten())
# =============================================================
# ==================== 3、汇聚层 ===============================
# 融合部分
model = merge_model(model1, model2)
model.summary()
# =============================================================
print("model.outputs:",model.output.shape)
# ============= 4、 全连接层,dropout,分类层 ====================
model = addLayers_model(model)
print(model.summary())
plot_model(model, to_file='model/1D2DLSTM_cross.png')
# =============================================================
# ==================== 5、模型训练指标 ==========================
# adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置
# adam = keras.optimizers.Adam(lr=0.1)
adam = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
# =============================================================
# 保存模型结构
model.save('model/1D2DLSTM_cross.h5')
|
flexible
|
{
"blob_id": "cce1b6f8e4b3f78adfa2243fe49b4994d35c5a38",
"index": 9898,
"step-1": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\n<mask token>\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\n<mask token>\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\n<mask token>\nmodel.summary()\nprint('model.outputs:', model.output.shape)\n<mask token>\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\n<mask token>\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-3": "<mask token>\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = 1024, 1\ninput_shape_2D = 32, 32, 1\nmodel1 = Sequential()\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\nmodel = merge_model(model1, model2)\nmodel.summary()\nprint('model.outputs:', model.output.shape)\nmodel = addLayers_model(model)\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-4": "<mask token>\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout, LSTM, Reshape\nfrom keras import Model\nfrom keras.utils import plot_model\n\n\ndef merge_model(model_1, model_2):\n \"\"\"\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n \"\"\"\n inp1 = model_1.input\n inp2 = model_2.input\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n \"\"\"\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n \"\"\"\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name='fine_dense')(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation='softmax')(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = 1024, 1\ninput_shape_2D = 32, 32, 1\nmodel1 = Sequential()\nmodel1.add(Conv1D(filters=8, kernel_size=3, input_shape=input_shape_1D,\n padding='same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\nmodel1.add(Conv1D(filters=16, kernel_size=3, input_shape=(1, 512), padding=\n 'same', activation='relu'))\nmodel1.add(MaxPooling1D(pool_size=2, padding='same'))\n<mask token>\nmodel1.add(LSTM(32, return_sequences=True))\nmodel1.add(Flatten())\nmodel2 = Sequential()\nmodel2.add(Conv2D(filters=8, kernel_size=(3, 3), input_shape=input_shape_2D,\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel2.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(16, 16, 1),\n padding='same', activation='relu'))\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n<mask token>\nprint('model2两层卷积后的输出形状:', model2.output_shape)\nmodel2.add(Reshape((64, 16)))\nmodel2.add(LSTM(32, return_sequences=True))\nmodel2.add(Flatten())\nmodel = merge_model(model1, model2)\nmodel.summary()\nprint('model.outputs:', model.output.shape)\nmodel = addLayers_model(model)\nprint(model.summary())\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\n 'accuracy'])\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n'''\n 1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的\n'''\n\n# 导入相关的包\nimport keras\n\n# 导入相关层的结构\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape\nfrom keras import Model\n\n# 可视化神经网络\nfrom keras.utils import plot_model\n\n\ndef merge_model(model_1, model_2):\n '''\n keras将两个独立的模型融合起来\n :param model_1:\n :param model_2:\n :return:\n '''\n\n # model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重\n # model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习)\n\n inp1 = model_1.input # 第一个模型的参数\n inp2 = model_2.input # 第二个模型的参数\n r1 = model_1.output\n r2 = model_2.output\n x = keras.layers.Concatenate(axis=1)([r1, r2])\n model = Model(inputs=[inp1, inp2], outputs=x)\n return model\n\n\ndef addLayers_model(model):\n '''\n 修改模型(模型加层)\n 采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层\n :param model: 待扩层的模型\n :return:\n '''\n origin_model = model\n for layer in origin_model.layers:\n layer.trainable = False # 原来的不训练,冻结网络层\n\n inp = origin_model.input\n x = origin_model.output\n den = Dense(512, name=\"fine_dense\")(x)\n l = Dropout(0.5)(den)\n result = Dense(10, activation=\"softmax\")(l)\n model = Model(input=inp, outputs=result)\n return model\n\n\ninput_shape_1D = (1024, 1)\ninput_shape_2D = (32, 32, 1)\n\n# 构建模型\n# 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax)\n# ====================1、 1D部分 ==============================\nmodel1 = Sequential()\n# Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3\nmodel1.add(Conv1D(filters=8,\n kernel_size=(3),\n input_shape=input_shape_1D,\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:8 @ 1*512。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n\n# Conv1D:16 @ 1*512。16个过滤器,大小设置为3\nmodel1.add(Conv1D(filters=16,\n kernel_size=(3),\n input_shape=(1, 512),\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:16 @ 1*256。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n'''\n# Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3\nmodel1.add(Conv1D(filters=16,\n kernel_size=(3),\n input_shape=(1, 512),\n padding='same',\n activation='relu'))\n\n# MaxPooling1D:16 @ 1*128。\nmodel1.add(MaxPooling1D(pool_size=(2), padding='same'))\n'''\n\nmodel1.add(LSTM(32,return_sequences=True))\nmodel1.add(Flatten()) # 压平:将输出压平为1维\n\n# =============================================================\n\n# ============ ======== 2、 2D部分 ============================\nmodel2 = Sequential()\n# Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=8,\n kernel_size=(3, 3),\n input_shape=input_shape_2D,\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:8 @ 16*16。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n\n# Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=16,\n kernel_size=(3, 3),\n input_shape=(16, 16, 1),\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:16 @ 8*8。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n\n'''\n# Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3\nmodel2.add(Conv2D(filters=16,\n kernel_size=(3, 3),\n input_shape=(8, 8, 1),\n padding='same',\n activation='relu'))\n\n# MaxPooling2D:16 @ 4*4。\nmodel2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n'''\nprint(\"model2两层卷积后的输出形状:\",model2.output_shape) # (None,4,4,16)\nmodel2.add(Reshape((64,16))) #(None,16,16)\nmodel2.add(LSTM(32,return_sequences=True))\nmodel2.add(Flatten())\n# =============================================================\n\n\n# ==================== 3、汇聚层 ===============================\n# 融合部分\nmodel = merge_model(model1, model2)\nmodel.summary()\n# =============================================================\n\nprint(\"model.outputs:\",model.output.shape)\n\n# ============= 4、 全连接层,dropout,分类层 ====================\nmodel = addLayers_model(model)\nprint(model.summary())\n\nplot_model(model, to_file='model/1D2DLSTM_cross.png')\n# =============================================================\n\n# ==================== 5、模型训练指标 ==========================\n# adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置\n# adam = keras.optimizers.Adam(lr=0.1)\nadam = keras.optimizers.Adam()\nmodel.compile(loss='categorical_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n# =============================================================\n\n# 保存模型结构\nmodel.save('model/1D2DLSTM_cross.h5')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def vol_shell(r1, r2):
a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))
return round(a, 3)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def vol_shell(r1, r2):
a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))
return round(a, 3)
print(vol_shell(3, 3))
<|reserved_special_token_1|>
import math
def vol_shell(r1, r2):
a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))
return round(a, 3)
print(vol_shell(3, 3))
<|reserved_special_token_1|>
import math
def vol_shell(r1, r2):
a=abs((4/3)*math.pi*((r1**3)-(r2**3)))
return round(a,3)
print(vol_shell(3,3))
|
flexible
|
{
"blob_id": "cd234911c1f990b8029dfa792d132847bf39a6aa",
"index": 445,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\nprint(vol_shell(3, 3))\n",
"step-4": "import math\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\nprint(vol_shell(3, 3))\n",
"step-5": "\nimport math\ndef vol_shell(r1, r2):\n a=abs((4/3)*math.pi*((r1**3)-(r2**3)))\n return round(a,3)\nprint(vol_shell(3,3))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python3
"""
Test of Rectangle class
"""
from contextlib import redirect_stdout
import io
import unittest
from random import randrange
from models.base import Base
from models.rectangle import Rectangle
from models.square import Square
class TestRectangle(unittest.TestCase):
""" Test Rectangle methods """
def setUp(self):
""" setUp """
Base._Base__nb_objects = 0
def tearDown(self):
""" tearDown destroys any existing objects and processes """
pass
def test_type(self):
""" Test type """
r1 = Rectangle(1, 2)
self.assertTrue(type(r1) is Rectangle)
def test_inheritance(self):
"""Tests if Rectangle inherits Base."""
self.assertTrue(issubclass(Rectangle, Base))
def test_constructor_no_args(self):
"""Tests constructor signature."""
with self.assertRaises(TypeError) as e:
r = Rectangle()
s = "__init__() missing 2 required positional arguments: 'width' \
and 'height'"
self.assertEqual(str(e.exception), s)
def test_constructor_many_args(self):
"""Tests constructor signature."""
with self.assertRaises(TypeError) as e:
r = Rectangle(1, 2, 3, 4, 5, 6)
s = "__init__() takes from 3 to 6 positional arguments but 7 were \
given"
self.assertEqual(str(e.exception), s)
def test_constructor_one_args(self):
"""Tests constructor signature."""
with self.assertRaises(TypeError) as e:
r = Rectangle(1)
s = "__init__() missing 1 required positional argument: 'height'"
self.assertEqual(str(e.exception), s)
def test_instantiation(self):
"""Tests instantiation."""
r = Rectangle(10, 20)
self.assertEqual(str(type(r)), "<class 'models.rectangle.Rectangle'>")
self.assertTrue(isinstance(r, Base))
d = {'_Rectangle__height': 20, '_Rectangle__width': 10,
'_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}
self.assertDictEqual(r.__dict__, d)
with self.assertRaises(TypeError) as e:
r = Rectangle("1", 2)
msg = "width must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(TypeError) as e:
r = Rectangle(1, "2")
msg = "height must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(TypeError) as e:
r = Rectangle(1, 2, "3")
msg = "x must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(TypeError) as e:
r = Rectangle(1, 2, 3, "4")
msg = "y must be an integer"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(-1, 2)
msg = "width must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, -2)
msg = "height must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(0, 2)
msg = "width must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, 0)
msg = "height must be > 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, 2, -3)
msg = "x must be >= 0"
self.assertEqual(str(e.exception), msg)
with self.assertRaises(ValueError) as e:
r = Rectangle(1, 2, 3, -4)
msg = "y must be >= 0"
self.assertEqual(str(e.exception), msg)
def test_id_inherited(self):
"""Tests if id is inherited from Base."""
Base._Base__nb_objects = 98
r = Rectangle(2, 4)
self.assertEqual(r.id, 99)
# -- #
def test_validate_type(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["x", "y", "width", "height"]
t = (3.14, -1.1, float('inf'), float('-inf'), True, "str", (2,),
[4], {5}, {6: 7}, None)
for attribute in attributes:
s = "{} must be an integer".format(attribute)
for invalid_type in t:
with self.assertRaises(TypeError) as e:
setattr(r, attribute, invalid_type)
self.assertEqual(str(e.exception), s)
def test_validate_value_negative_gt(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["width", "height"]
for attribute in attributes:
s = "{} must be > 0".format(attribute)
with self.assertRaises(ValueError) as e:
setattr(r, attribute, -(randrange(10) + 1))
self.assertEqual(str(e.exception), s)
def test_validate_value_negative_ge(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["x", "y"]
for attribute in attributes:
s = "{} must be >= 0".format(attribute)
with self.assertRaises(ValueError) as e:
setattr(r, attribute, -(randrange(10) + 1))
self.assertEqual(str(e.exception), s)
def test_validate_value_zero(self):
"""Tests property validation."""
r = Rectangle(1, 2)
attributes = ["width", "height"]
for attribute in attributes:
s = "{} must be > 0".format(attribute)
with self.assertRaises(ValueError) as e:
setattr(r, attribute, 0)
self.assertEqual(str(e.exception), s)
def test_property(self):
"""Tests property setting/getting."""
r = Rectangle(1, 2)
attributes = ["x", "y", "width", "height"]
for attribute in attributes:
n = randrange(10) + 1
setattr(r, attribute, n)
self.assertEqual(getattr(r, attribute), n)
def test_property_range_zero(self):
"""Tests property setting/getting."""
r = Rectangle(1, 2)
r.x = 0
r.y = 0
self.assertEqual(r.x, 0)
self.assertEqual(r.y, 0)
def test_area_no_args(self):
"""Tests area() method signature."""
r = Rectangle(5, 6)
with self.assertRaises(TypeError) as e:
Rectangle.area()
s = "area() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
def test_area(self):
"""Tests area() method compuation."""
r = Rectangle(5, 6)
self.assertEqual(r.area(), 30)
w = randrange(10) + 1
h = randrange(10) + 1
r.width = w
r.height = h
self.assertEqual(r.area(), w * h)
w = randrange(10) + 1
h = randrange(10) + 1
r = Rectangle(w, h, 7, 8, 9)
self.assertEqual(r.area(), w * h)
w = randrange(10) + 1
h = randrange(10) + 1
r = Rectangle(w, h, y=7, x=8, id=9)
self.assertEqual(r.area(), w * h)
def test_display_no_args(self):
"""Tests display() method signature."""
r = Rectangle(9, 8)
with self.assertRaises(TypeError) as e:
Rectangle.display()
s = "display() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
def test_display_simple(self):
"""Tests display() method output."""
r = Rectangle(1, 1)
f = io.StringIO()
with redirect_stdout(f):
r.display()
s = "#\n"
self.assertEqual(f.getvalue(), s)
r.width = 2
r.height = 2
f = io.StringIO()
with redirect_stdout(f):
r.display()
s = "##\n##\n"
self.assertEqual(f.getvalue(), s)
r = Rectangle(2, 2, 2, 2)
f = io.StringIO()
with redirect_stdout(f):
r.display()
s = "\n\n ##\n ##\n"
self.assertEqual(f.getvalue(), s)
def test_K_str_no_args(self):
"""Tests __str__() method signature."""
r = Rectangle(5, 2)
with self.assertRaises(TypeError) as e:
Rectangle.__str__()
s = "__str__() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
def test_K_str(self):
"""Tests __str__() method return."""
r = Rectangle(5, 2)
s = '[Rectangle] (1) 0/0 - 5/2'
self.assertEqual(str(r), s)
r = Rectangle(1, 1, 1)
s = '[Rectangle] (2) 1/0 - 1/1'
self.assertEqual(str(r), s)
r = Rectangle(3, 4, 5, 6)
s = '[Rectangle] (3) 5/6 - 3/4'
self.assertEqual(str(r), s)
Base._Base__nb_objects = 0
r1 = Rectangle(4, 6, 2, 1, 12)
self.assertEqual(str(r1), "[Rectangle] (12) 2/1 - 4/6")
r2 = Rectangle(5, 5, 1)
self.assertEqual(str(r2), "[Rectangle] (1) 1/0 - 5/5")
def test_update_no_args(self):
"""Tests update() method """
r = Rectangle(5, 2)
with self.assertRaises(TypeError) as e:
Rectangle.update()
s = "update() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
d = r.__dict__.copy()
r.update()
self.assertEqual(r.__dict__, d)
def test_update_args(self):
"""Tests update() postional args."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
r.update(10, 5)
d["_Rectangle__width"] = 5
self.assertEqual(r.__dict__, d)
r.update(10, 5, 17)
d["_Rectangle__height"] = 17
self.assertEqual(r.__dict__, d)
r.update(10, 5, 17, 20)
d["_Rectangle__x"] = 20
self.assertEqual(r.__dict__, d)
r.update(10, 5, 17, 20, 25)
d["_Rectangle__y"] = 25
self.assertEqual(r.__dict__, d)
def test_update_args_bad(self):
"""Tests update() positional arg bad values."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
with self.assertRaises(ValueError) as e:
r.update(10, -5)
s = "width must be > 0"
self.assertEqual(str(e.exception), s)
with self.assertRaises(ValueError) as e:
r.update(10, 5, -17)
s = "height must be > 0"
self.assertEqual(str(e.exception), s)
with self.assertRaises(ValueError) as e:
r.update(10, 5, 17, -20)
s = "x must be >= 0"
self.assertEqual(str(e.exception), s)
with self.assertRaises(ValueError) as e:
r.update(10, 5, 17, 20, -25)
s = "y must be >= 0"
self.assertEqual(str(e.exception), s)
def test_update_kwargs(self):
"""Tests update() keyword args."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(id=10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
r.update(width=5)
d["_Rectangle__width"] = 5
self.assertEqual(r.__dict__, d)
r.update(height=17)
d["_Rectangle__height"] = 17
self.assertEqual(r.__dict__, d)
r.update(x=20)
d["_Rectangle__x"] = 20
self.assertEqual(r.__dict__, d)
r.update(y=25)
d["_Rectangle__y"] = 25
self.assertEqual(r.__dict__, d)
def test_update_kwargs_2(self):
"""Tests update() keyword args."""
r = Rectangle(5, 2)
d = r.__dict__.copy()
r.update(id=10)
d["id"] = 10
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5)
d["_Rectangle__width"] = 5
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5, height=17)
d["_Rectangle__height"] = 17
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5, height=17, x=20)
d["_Rectangle__x"] = 20
self.assertEqual(r.__dict__, d)
r.update(id=10, width=5, height=17, x=20, y=25)
d["_Rectangle__y"] = 25
self.assertEqual(r.__dict__, d)
r.update(y=25, id=10, height=17, x=20, width=5)
self.assertEqual(r.__dict__, d)
Base._Base__nb_objects = 0
r1 = Rectangle(10, 10, 10, 10)
self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/10")
r1.update(height=1)
self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/1")
r1.update(width=1, x=2)
self.assertEqual(str(r1), "[Rectangle] (1) 2/10 - 1/1")
r1.update(y=1, width=2, x=3, id=89)
self.assertEqual(str(r1), "[Rectangle] (89) 3/1 - 2/1")
r1.update(x=1, height=2, y=3, width=4)
self.assertEqual(str(r1), "[Rectangle] (89) 1/3 - 4/2")
Base._Base__nb_objects = 0
r1 = Rectangle(10, 10, 10, 10)
self.assertEqual(str(r1), "[Rectangle] (1) 10/10 - 10/10")
r1.update(89)
self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 10/10")
r1.update(89, 2)
self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 2/10")
r1.update(89, 2, 3)
self.assertEqual(str(r1), "[Rectangle] (89) 10/10 - 2/3")
r1.update(89, 2, 3, 4)
self.assertEqual(str(r1), "[Rectangle] (89) 4/10 - 2/3")
r1.update(89, 2, 3, 4, 5)
self.assertEqual(str(r1), "[Rectangle] (89) 4/5 - 2/3")
def test_to_dictionary(self):
"""Tests to_dictionary() """
with self.assertRaises(TypeError) as e:
Rectangle.to_dictionary()
s = "to_dictionary() missing 1 required positional argument: 'self'"
self.assertEqual(str(e.exception), s)
r = Rectangle(1, 2)
d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}
self.assertEqual(r.to_dictionary(), d)
r = Rectangle(1, 2, 3, 4, 5)
d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}
self.assertEqual(r.to_dictionary(), d)
r.x = 10
r.y = 20
r.width = 30
r.height = 40
d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}
self.assertEqual(r.to_dictionary(), d)
r1 = Rectangle(10, 2, 1, 9)
r1_dictionary = r1.to_dictionary()
r2 = Rectangle(1, 1)
r2.update(**r1_dictionary)
self.assertEqual(str(r1), str(r2))
self.assertNotEqual(r1, r2)
|
normal
|
{
"blob_id": "ca00091b7ebcb9ee45b77c919c458c75e3db5b1e",
"index": 4783,
"step-1": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n <mask token>\n <mask token>\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n <mask token>\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n <mask token>\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n <mask token>\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n",
"step-2": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n <mask token>\n <mask token>\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n <mask token>\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n",
"step-3": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n <mask token>\n <mask token>\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_kwargs_2(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20, y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n r.update(y=25, id=10, height=17, x=20, width=5)\n self.assertEqual(r.__dict__, d)\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(height=1)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/1')\n r1.update(width=1, x=2)\n self.assertEqual(str(r1), '[Rectangle] (1) 2/10 - 1/1')\n r1.update(y=1, width=2, x=3, id=89)\n self.assertEqual(str(r1), '[Rectangle] (89) 3/1 - 2/1')\n r1.update(x=1, height=2, y=3, width=4)\n self.assertEqual(str(r1), '[Rectangle] (89) 1/3 - 4/2')\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(89)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 10/10')\n r1.update(89, 2)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/10')\n r1.update(89, 2, 3)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/3')\n r1.update(89, 2, 3, 4)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/10 - 2/3')\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/5 - 2/3')\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n",
"step-4": "<mask token>\n\n\nclass TestRectangle(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n\n def test_constructor_no_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle()\n s = (\n \"__init__() missing 2 required positional arguments: 'width' and 'height'\"\n )\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_many_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, 4, 5, 6)\n s = (\n '__init__() takes from 3 to 6 positional arguments but 7 were given'\n )\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n with self.assertRaises(TypeError) as e:\n r = Rectangle('1', 2)\n msg = 'width must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, '2')\n msg = 'height must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, '3')\n msg = 'x must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, '4')\n msg = 'y must be an integer'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = 'width must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = 'height must be > 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = 'x must be >= 0'\n self.assertEqual(str(e.exception), msg)\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = 'y must be >= 0'\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n t = 3.14, -1.1, float('inf'), float('-inf'), True, 'str', (2,), [4], {5\n }, {(6): 7}, None\n for attribute in attributes:\n s = '{} must be an integer'.format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y']\n for attribute in attributes:\n s = '{} must be >= 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['width', 'height']\n for attribute in attributes:\n s = '{} must be > 0'.format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = ['x', 'y', 'width', 'height']\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '#\\n'\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '##\\n##\\n'\n self.assertEqual(f.getvalue(), s)\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = '\\n\\n ##\\n ##\\n'\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), '[Rectangle] (12) 2/1 - 4/6')\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), '[Rectangle] (1) 1/0 - 5/5')\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(10, 5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(10, 5, 17, 20, 25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = 'width must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = 'height must be > 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = 'x must be >= 0'\n self.assertEqual(str(e.exception), s)\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = 'y must be >= 0'\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_kwargs_2(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n r.update(id=10)\n d['id'] = 10\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5)\n d['_Rectangle__width'] = 5\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17)\n d['_Rectangle__height'] = 17\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20)\n d['_Rectangle__x'] = 20\n self.assertEqual(r.__dict__, d)\n r.update(id=10, width=5, height=17, x=20, y=25)\n d['_Rectangle__y'] = 25\n self.assertEqual(r.__dict__, d)\n r.update(y=25, id=10, height=17, x=20, width=5)\n self.assertEqual(r.__dict__, d)\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(height=1)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/1')\n r1.update(width=1, x=2)\n self.assertEqual(str(r1), '[Rectangle] (1) 2/10 - 1/1')\n r1.update(y=1, width=2, x=3, id=89)\n self.assertEqual(str(r1), '[Rectangle] (89) 3/1 - 2/1')\n r1.update(x=1, height=2, y=3, width=4)\n self.assertEqual(str(r1), '[Rectangle] (89) 1/3 - 4/2')\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), '[Rectangle] (1) 10/10 - 10/10')\n r1.update(89)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 10/10')\n r1.update(89, 2)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/10')\n r1.update(89, 2, 3)\n self.assertEqual(str(r1), '[Rectangle] (89) 10/10 - 2/3')\n r1.update(89, 2, 3, 4)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/10 - 2/3')\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), '[Rectangle] (89) 4/5 - 2/3')\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n",
"step-5": "#!/usr/bin/python3\n\"\"\"\nTest of Rectangle class\n\"\"\"\nfrom contextlib import redirect_stdout\nimport io\nimport unittest\nfrom random import randrange\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\n\nclass TestRectangle(unittest.TestCase):\n \"\"\" Test Rectangle methods \"\"\"\n\n def setUp(self):\n \"\"\" setUp \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" tearDown destroys any existing objects and processes \"\"\"\n pass\n\n def test_type(self):\n \"\"\" Test type \"\"\"\n r1 = Rectangle(1, 2)\n self.assertTrue(type(r1) is Rectangle)\n\n def test_inheritance(self):\n \"\"\"Tests if Rectangle inherits Base.\"\"\"\n self.assertTrue(issubclass(Rectangle, Base))\n\n def test_constructor_no_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle()\n s = \"__init__() missing 2 required positional arguments: 'width' \\\nand 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_many_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, 4, 5, 6)\n s = \"__init__() takes from 3 to 6 positional arguments but 7 were \\\ngiven\"\n self.assertEqual(str(e.exception), s)\n\n def test_constructor_one_args(self):\n \"\"\"Tests constructor signature.\"\"\"\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1)\n s = \"__init__() missing 1 required positional argument: 'height'\"\n self.assertEqual(str(e.exception), s)\n\n def test_instantiation(self):\n \"\"\"Tests instantiation.\"\"\"\n r = Rectangle(10, 20)\n self.assertEqual(str(type(r)), \"<class 'models.rectangle.Rectangle'>\")\n self.assertTrue(isinstance(r, Base))\n d = {'_Rectangle__height': 20, '_Rectangle__width': 10,\n '_Rectangle__x': 0, '_Rectangle__y': 0, 'id': 1}\n self.assertDictEqual(r.__dict__, d)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(\"1\", 2)\n msg = \"width must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, \"2\")\n msg = \"height must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, \"3\")\n msg = \"x must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(TypeError) as e:\n r = Rectangle(1, 2, 3, \"4\")\n msg = \"y must be an integer\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(-1, 2)\n msg = \"width must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, -2)\n msg = \"height must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(0, 2)\n msg = \"width must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 0)\n msg = \"height must be > 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, -3)\n msg = \"x must be >= 0\"\n self.assertEqual(str(e.exception), msg)\n\n with self.assertRaises(ValueError) as e:\n r = Rectangle(1, 2, 3, -4)\n msg = \"y must be >= 0\"\n self.assertEqual(str(e.exception), msg)\n\n def test_id_inherited(self):\n \"\"\"Tests if id is inherited from Base.\"\"\"\n Base._Base__nb_objects = 98\n r = Rectangle(2, 4)\n self.assertEqual(r.id, 99)\n\n # -- #\n\n def test_validate_type(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"x\", \"y\", \"width\", \"height\"]\n t = (3.14, -1.1, float('inf'), float('-inf'), True, \"str\", (2,),\n [4], {5}, {6: 7}, None)\n\n for attribute in attributes:\n s = \"{} must be an integer\".format(attribute)\n for invalid_type in t:\n with self.assertRaises(TypeError) as e:\n setattr(r, attribute, invalid_type)\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_gt(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"width\", \"height\"]\n for attribute in attributes:\n s = \"{} must be > 0\".format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_negative_ge(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"x\", \"y\"]\n for attribute in attributes:\n s = \"{} must be >= 0\".format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, -(randrange(10) + 1))\n self.assertEqual(str(e.exception), s)\n\n def test_validate_value_zero(self):\n \"\"\"Tests property validation.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"width\", \"height\"]\n for attribute in attributes:\n s = \"{} must be > 0\".format(attribute)\n with self.assertRaises(ValueError) as e:\n setattr(r, attribute, 0)\n self.assertEqual(str(e.exception), s)\n\n def test_property(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n attributes = [\"x\", \"y\", \"width\", \"height\"]\n for attribute in attributes:\n n = randrange(10) + 1\n setattr(r, attribute, n)\n self.assertEqual(getattr(r, attribute), n)\n\n def test_property_range_zero(self):\n \"\"\"Tests property setting/getting.\"\"\"\n r = Rectangle(1, 2)\n r.x = 0\n r.y = 0\n self.assertEqual(r.x, 0)\n self.assertEqual(r.y, 0)\n\n def test_area_no_args(self):\n \"\"\"Tests area() method signature.\"\"\"\n r = Rectangle(5, 6)\n with self.assertRaises(TypeError) as e:\n Rectangle.area()\n s = \"area() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_area(self):\n \"\"\"Tests area() method compuation.\"\"\"\n r = Rectangle(5, 6)\n self.assertEqual(r.area(), 30)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r.width = w\n r.height = h\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, 7, 8, 9)\n self.assertEqual(r.area(), w * h)\n w = randrange(10) + 1\n h = randrange(10) + 1\n r = Rectangle(w, h, y=7, x=8, id=9)\n self.assertEqual(r.area(), w * h)\n\n def test_display_no_args(self):\n \"\"\"Tests display() method signature.\"\"\"\n r = Rectangle(9, 8)\n with self.assertRaises(TypeError) as e:\n Rectangle.display()\n s = \"display() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_display_simple(self):\n \"\"\"Tests display() method output.\"\"\"\n r = Rectangle(1, 1)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"#\\n\"\n self.assertEqual(f.getvalue(), s)\n r.width = 2\n r.height = 2\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"##\\n##\\n\"\n self.assertEqual(f.getvalue(), s)\n\n r = Rectangle(2, 2, 2, 2)\n f = io.StringIO()\n with redirect_stdout(f):\n r.display()\n s = \"\\n\\n ##\\n ##\\n\"\n self.assertEqual(f.getvalue(), s)\n\n def test_K_str_no_args(self):\n \"\"\"Tests __str__() method signature.\"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.__str__()\n s = \"__str__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n def test_K_str(self):\n \"\"\"Tests __str__() method return.\"\"\"\n r = Rectangle(5, 2)\n s = '[Rectangle] (1) 0/0 - 5/2'\n self.assertEqual(str(r), s)\n r = Rectangle(1, 1, 1)\n s = '[Rectangle] (2) 1/0 - 1/1'\n self.assertEqual(str(r), s)\n r = Rectangle(3, 4, 5, 6)\n s = '[Rectangle] (3) 5/6 - 3/4'\n self.assertEqual(str(r), s)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(4, 6, 2, 1, 12)\n self.assertEqual(str(r1), \"[Rectangle] (12) 2/1 - 4/6\")\n\n r2 = Rectangle(5, 5, 1)\n self.assertEqual(str(r2), \"[Rectangle] (1) 1/0 - 5/5\")\n\n def test_update_no_args(self):\n \"\"\"Tests update() method \"\"\"\n r = Rectangle(5, 2)\n with self.assertRaises(TypeError) as e:\n Rectangle.update()\n s = \"update() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n d = r.__dict__.copy()\n r.update()\n self.assertEqual(r.__dict__, d)\n\n def test_update_args(self):\n \"\"\"Tests update() postional args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5)\n d[\"_Rectangle__width\"] = 5\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5, 17)\n d[\"_Rectangle__height\"] = 17\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5, 17, 20)\n d[\"_Rectangle__x\"] = 20\n self.assertEqual(r.__dict__, d)\n\n r.update(10, 5, 17, 20, 25)\n d[\"_Rectangle__y\"] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_args_bad(self):\n \"\"\"Tests update() positional arg bad values.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = \"width must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = \"height must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = \"x must be >= 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = \"y must be >= 0\"\n self.assertEqual(str(e.exception), s)\n\n def test_update_kwargs(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(id=10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n r.update(width=5)\n d[\"_Rectangle__width\"] = 5\n self.assertEqual(r.__dict__, d)\n\n r.update(height=17)\n d[\"_Rectangle__height\"] = 17\n self.assertEqual(r.__dict__, d)\n\n r.update(x=20)\n d[\"_Rectangle__x\"] = 20\n self.assertEqual(r.__dict__, d)\n\n r.update(y=25)\n d[\"_Rectangle__y\"] = 25\n self.assertEqual(r.__dict__, d)\n\n def test_update_kwargs_2(self):\n \"\"\"Tests update() keyword args.\"\"\"\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(id=10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5)\n d[\"_Rectangle__width\"] = 5\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5, height=17)\n d[\"_Rectangle__height\"] = 17\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5, height=17, x=20)\n d[\"_Rectangle__x\"] = 20\n self.assertEqual(r.__dict__, d)\n\n r.update(id=10, width=5, height=17, x=20, y=25)\n d[\"_Rectangle__y\"] = 25\n self.assertEqual(r.__dict__, d)\n\n r.update(y=25, id=10, height=17, x=20, width=5)\n self.assertEqual(r.__dict__, d)\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), \"[Rectangle] (1) 10/10 - 10/10\")\n\n r1.update(height=1)\n self.assertEqual(str(r1), \"[Rectangle] (1) 10/10 - 10/1\")\n\n r1.update(width=1, x=2)\n self.assertEqual(str(r1), \"[Rectangle] (1) 2/10 - 1/1\")\n\n r1.update(y=1, width=2, x=3, id=89)\n self.assertEqual(str(r1), \"[Rectangle] (89) 3/1 - 2/1\")\n\n r1.update(x=1, height=2, y=3, width=4)\n self.assertEqual(str(r1), \"[Rectangle] (89) 1/3 - 4/2\")\n\n Base._Base__nb_objects = 0\n r1 = Rectangle(10, 10, 10, 10)\n self.assertEqual(str(r1), \"[Rectangle] (1) 10/10 - 10/10\")\n\n r1.update(89)\n self.assertEqual(str(r1), \"[Rectangle] (89) 10/10 - 10/10\")\n\n r1.update(89, 2)\n self.assertEqual(str(r1), \"[Rectangle] (89) 10/10 - 2/10\")\n\n r1.update(89, 2, 3)\n self.assertEqual(str(r1), \"[Rectangle] (89) 10/10 - 2/3\")\n\n r1.update(89, 2, 3, 4)\n self.assertEqual(str(r1), \"[Rectangle] (89) 4/10 - 2/3\")\n\n r1.update(89, 2, 3, 4, 5)\n self.assertEqual(str(r1), \"[Rectangle] (89) 4/5 - 2/3\")\n\n def test_to_dictionary(self):\n \"\"\"Tests to_dictionary() \"\"\"\n with self.assertRaises(TypeError) as e:\n Rectangle.to_dictionary()\n s = \"to_dictionary() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), s)\n\n r = Rectangle(1, 2)\n d = {'x': 0, 'y': 0, 'width': 1, 'id': 1, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n\n r = Rectangle(1, 2, 3, 4, 5)\n d = {'x': 3, 'y': 4, 'width': 1, 'id': 5, 'height': 2}\n self.assertEqual(r.to_dictionary(), d)\n\n r.x = 10\n r.y = 20\n r.width = 30\n r.height = 40\n d = {'x': 10, 'y': 20, 'width': 30, 'id': 5, 'height': 40}\n self.assertEqual(r.to_dictionary(), d)\n\n r1 = Rectangle(10, 2, 1, 9)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle(1, 1)\n r2.update(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertNotEqual(r1, r2)\n",
"step-ids": [
23,
25,
26,
28,
31
]
}
|
[
23,
25,
26,
28,
31
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.