code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import cv2, os, fitz, shutil
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from PIL import UnidentifiedImageError
pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'
config = r'--oem 3 --psm'
# Возвращает путь к картинке, созданной на основе 1 СТРАНИЦЫ pdf файла
# На входе требуется название pdf файла
def pdf_to_png(filename):
doc = fitz.open('pdf_files\{}'.format(filename))
zoom = 4 # zoom factor (влияет на качество получаемого из pdf изображения png)
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\{}'.format(new_filename))
return new_filename
# i в аргументах - номер итерации, чтобы вырезанных символов не пересекались
def create_learn_base(filename, language, i): # Создает папки с вырезанными распознанными символами в папке learn_data
# Открываем файлы с картинками
img_to_read = cv2.imdecode(np.fromfile('photo_files\{}'.format(filename), dtype=np.uint8),cv2.IMREAD_UNCHANGED) # МОДУЛЬ ДЛЯ ЧТЕНИЯ РУССКИХ ФАЙЛОВ #
img_to_crop = Image.open('photo_files\{}'.format(filename))
# Считываем текст с картинки в массив, если нужно - выводим
# words_in_image = pytesseract.image_to_string(img_to_read, lang=language)
# print(words_in_image)
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines(): # Вырезаем по очереди квадраты с символами
# Обрабатываем ошибки, возникающие при выходе за пределы картинки при обрезке
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0, 0, 255), 1)
area = (x, height - h, w, height - y) # Задаем область, содержащую вырезаемый символ
cropped_img = img_to_crop.crop(area)
try: # Обрабатываем ошибки, возникающие при неправильных именах файлов
if not os.path.exists('learn_data\s_{}'.format(box[0])):
os.mkdir('learn_data\s_{}'.format(box[0]))
cropped_img.save('learn_data\s_{}/{}_{}.PNG'.format(box[0], box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/'+the_dir):
try:
Image.open('learn_data/'+the_dir+'/'+the_file)
except OSError:
os.remove('learn_data/'+the_dir+'/'+the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
clear_directory('learn_data')
for the_file in os.listdir('pdf_files'):
filename = the_file
png_filename = pdf_to_png(filename)
i = 0
for the_file in os.listdir('photo_files'):
i += create_learn_base(the_file, 'rus', i)
fix_dir_bugs()
############# РУЧНАЯ ПРОВЕРКА #############
# Image.open('renamed_learn_data/26/C_591.PNG')
# fix_dir_bugs()
# try:
# Image.open('renamed_learn_data/26/C_591.PNG')
# except OSError:
# os.remove('renamed_learn_data/26/C_591.PNG')
|
normal
|
{
"blob_id": "84980b8923fa25664833f810a906d27531145141",
"index": 1066,
"step-1": "<mask token>\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\nclear_directory('learn_data')\nfor the_file in os.listdir('pdf_files'):\n filename = the_file\n png_filename = pdf_to_png(filename)\n<mask token>\nfor the_file in os.listdir('photo_files'):\n i += create_learn_base(the_file, 'rus', i)\nfix_dir_bugs()\n",
"step-3": "<mask token>\npytesseract.tesseract_cmd = (\n 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe')\nconfig = '--oem 3 --psm'\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\nclear_directory('learn_data')\nfor the_file in os.listdir('pdf_files'):\n filename = the_file\n png_filename = pdf_to_png(filename)\ni = 0\nfor the_file in os.listdir('photo_files'):\n i += create_learn_base(the_file, 'rus', i)\nfix_dir_bugs()\n",
"step-4": "import cv2, os, fitz, shutil\nimport numpy as np\nfrom PIL import Image\nfrom pytesseract import pytesseract\nfrom PIL import UnidentifiedImageError\npytesseract.tesseract_cmd = (\n 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe')\nconfig = '--oem 3 --psm'\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\nclear_directory('learn_data')\nfor the_file in os.listdir('pdf_files'):\n filename = the_file\n png_filename = pdf_to_png(filename)\ni = 0\nfor the_file in os.listdir('photo_files'):\n i += create_learn_base(the_file, 'rus', i)\nfix_dir_bugs()\n",
"step-5": "import cv2, os, fitz, shutil\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom pytesseract import pytesseract\r\nfrom PIL import UnidentifiedImageError\r\n\r\npytesseract.tesseract_cmd = 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe'\r\nconfig = r'--oem 3 --psm'\r\n\r\n\r\n# Возвращает путь к картинке, созданной на основе 1 СТРАНИЦЫ pdf файла\r\n# На входе требуется название pdf файла\r\ndef pdf_to_png(filename):\r\n doc = fitz.open('pdf_files\\{}'.format(filename))\r\n zoom = 4 # zoom factor (влияет на качество получаемого из pdf изображения png)\r\n page = doc.loadPage(0)\r\n mat = fitz.Matrix(zoom, zoom)\r\n pix = page.getPixmap(matrix=mat)\r\n new_filename = filename.replace('pdf', 'png')\r\n pix.writePNG('photo_files\\{}'.format(new_filename))\r\n return new_filename\r\n\r\n\r\n# i в аргументах - номер итерации, чтобы вырезанных символов не пересекались\r\ndef create_learn_base(filename, language, i): # Создает папки с вырезанными распознанными символами в папке learn_data\r\n # Открываем файлы с картинками\r\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\{}'.format(filename), dtype=np.uint8),cv2.IMREAD_UNCHANGED) # МОДУЛЬ ДЛЯ ЧТЕНИЯ РУССКИХ ФАЙЛОВ #\r\n img_to_crop = Image.open('photo_files\\{}'.format(filename))\r\n\r\n # Считываем текст с картинки в массив, если нужно - выводим\r\n # words_in_image = pytesseract.image_to_string(img_to_read, lang=language)\r\n # print(words_in_image)\r\n\r\n height, width, c = img_to_read.shape\r\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\r\n\r\n for box in letter_boxes.splitlines(): # Вырезаем по очереди квадраты с символами\r\n # Обрабатываем ошибки, возникающие при выходе за пределы картинки при обрезке\r\n try:\r\n i += 1\r\n box = box.split()\r\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\r\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0, 0, 255), 1)\r\n area = (x, height - h, w, height - y) # Задаем область, содержащую вырезаемый символ\r\n cropped_img = img_to_crop.crop(area)\r\n try: # Обрабатываем ошибки, возникающие при неправильных именах файлов\r\n if not os.path.exists('learn_data\\s_{}'.format(box[0])):\r\n os.mkdir('learn_data\\s_{}'.format(box[0]))\r\n cropped_img.save('learn_data\\s_{}/{}_{}.PNG'.format(box[0], box[0], i))\r\n except OSError:\r\n pass\r\n except SystemError:\r\n pass\r\n return i\r\n\r\n\r\ndef fix_dir_bugs():\r\n for the_dir in os.listdir('learn_data'):\r\n for the_file in os.listdir('learn_data/'+the_dir):\r\n try:\r\n Image.open('learn_data/'+the_dir+'/'+the_file)\r\n except OSError:\r\n os.remove('learn_data/'+the_dir+'/'+the_file)\r\n\r\n\r\ndef clear_directory(directory):\r\n shutil.rmtree(directory)\r\n os.makedirs(directory)\r\n\r\n\r\nclear_directory('learn_data')\r\n\r\n\r\nfor the_file in os.listdir('pdf_files'):\r\n filename = the_file\r\n png_filename = pdf_to_png(filename)\r\n\r\ni = 0\r\nfor the_file in os.listdir('photo_files'):\r\n i += create_learn_base(the_file, 'rus', i)\r\n\r\nfix_dir_bugs()\r\n############# РУЧНАЯ ПРОВЕРКА #############\r\n\r\n\r\n# Image.open('renamed_learn_data/26/C_591.PNG')\r\n# fix_dir_bugs()\r\n\r\n# try:\r\n# Image.open('renamed_learn_data/26/C_591.PNG')\r\n# except OSError:\r\n# os.remove('renamed_learn_data/26/C_591.PNG')",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/env python
s = '''Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски
аpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё
он ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон
ыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га
ынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »
иусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе
еоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы
т аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа
мж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне
оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч
ртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер
й к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4'''
def fence_decipher(m: str, key: int) -> str:
chunklens = [0 for _ in range(key)]
nfence = 0
dx = 1
for i in m:
chunklens[nfence] += 1
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
print(chunklens)
chunks = []
x = 0
for chunklen in chunklens:
chunks.append(list(m[x:x + chunklen]))
x += chunklen
nfence = 0
dx = 1
ans = []
for _ in m:
ans.append(chunks[nfence].pop(0))
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
return ''.join(ans)
if __name__ == '__main__':
print(fence_decipher(s, 4))
|
normal
|
{
"blob_id": "a8bed0b5a6a95d67b5602b395f1d0ea12cd53fb0",
"index": 9166,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-4": "s = \"\"\"Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски\nаpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё\nон ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон\nыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га\nынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »\nиусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе\nеоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы\nт аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа\nмж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне\n оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч\nртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер\nй к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4\"\"\"\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-5": "#!/usr/bin/env python\ns = '''Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски\nаpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё\nон ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон\nыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га\nынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »\nиусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе\nеоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы\nт аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа\nмж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне\n оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч\nртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер\nй к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4'''\n\n\ndef fence_decipher(m: str, key: int) -> str:\n chunklens = [0 for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from mock import Mock
from shelf.hook.background import action
from shelf.hook.event import Event
from tests.test_base import TestBase
import json
import os
import logging
from pyproctor import MonkeyPatcher
class ExecuteCommandTest(TestBase):
def setUp(self):
super(ExecuteCommandTest, self).setUp()
self.cwd = os.path.join(os.path.dirname(__file__), "../../..")
self.logger = Mock()
MonkeyPatcher.patch(action, "create_background_logger", Mock(return_value=self.logger))
def create_data(self, command, event):
data = {
"command": command,
"log_level": logging.DEBUG,
"event": event,
"uri": "https://api.shelf.com/fake/artifact/1",
"meta_uri": "https://api.shelf.com/fake/artifact/1/_meta",
"cwd": self.cwd
}
return data
def create_stdout(self, data):
l = [
"SHELF_EVENT={0}".format(data["event"]),
"SHELF_URI={0}".format(data["uri"]),
"SHELF_META_URI={0}".format(data["meta_uri"])
]
return ", ".join(l)
def test_success(self):
data = self.create_data("./tests/bin/hook-test", Event.ARTIFACT_UPLOADED)
result = action.execute_command(**data)
self.assertTrue(result)
expected_result = {
"stdout": self.create_stdout(data),
"stderr": "STDERR",
"exit_code": 0
}
self.logger.debug.assert_called_with("Command Result: {0}".format(json.dumps(expected_result, indent=4)))
def test_failure(self):
data = self.create_data("./tests/bin/hook-test", "fail")
result = action.execute_command(**data)
self.assertFalse(result)
expected_result = {
"stdout": self.create_stdout(data),
"stderr": "STDERR",
"exit_code": 1
}
self.logger.debug.assert_called_with("Command Result: {0}".format(json.dumps(expected_result, indent=4)))
|
normal
|
{
"blob_id": "c312bf096c7f4aaf9269a8885ff254fd4852cfe0",
"index": 9996,
"step-1": "<mask token>\n\n\nclass ExecuteCommandTest(TestBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), '../../..')\n self.logger = Mock()\n MonkeyPatcher.patch(action, 'create_background_logger', Mock(\n return_value=self.logger))\n <mask token>\n\n def create_stdout(self, data):\n l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.\n format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]\n return ', '.join(l)\n\n def test_success(self):\n data = self.create_data('./tests/bin/hook-test', Event.\n ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 0}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data('./tests/bin/hook-test', 'fail')\n result = action.execute_command(**data)\n self.assertFalse(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 1}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n",
"step-3": "<mask token>\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), '../../..')\n self.logger = Mock()\n MonkeyPatcher.patch(action, 'create_background_logger', Mock(\n return_value=self.logger))\n\n def create_data(self, command, event):\n data = {'command': command, 'log_level': logging.DEBUG, 'event':\n event, 'uri': 'https://api.shelf.com/fake/artifact/1',\n 'meta_uri': 'https://api.shelf.com/fake/artifact/1/_meta',\n 'cwd': self.cwd}\n return data\n\n def create_stdout(self, data):\n l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.\n format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]\n return ', '.join(l)\n\n def test_success(self):\n data = self.create_data('./tests/bin/hook-test', Event.\n ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 0}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data('./tests/bin/hook-test', 'fail')\n result = action.execute_command(**data)\n self.assertFalse(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 1}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n",
"step-4": "from mock import Mock\nfrom shelf.hook.background import action\nfrom shelf.hook.event import Event\nfrom tests.test_base import TestBase\nimport json\nimport os\nimport logging\nfrom pyproctor import MonkeyPatcher\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), '../../..')\n self.logger = Mock()\n MonkeyPatcher.patch(action, 'create_background_logger', Mock(\n return_value=self.logger))\n\n def create_data(self, command, event):\n data = {'command': command, 'log_level': logging.DEBUG, 'event':\n event, 'uri': 'https://api.shelf.com/fake/artifact/1',\n 'meta_uri': 'https://api.shelf.com/fake/artifact/1/_meta',\n 'cwd': self.cwd}\n return data\n\n def create_stdout(self, data):\n l = ['SHELF_EVENT={0}'.format(data['event']), 'SHELF_URI={0}'.\n format(data['uri']), 'SHELF_META_URI={0}'.format(data['meta_uri'])]\n return ', '.join(l)\n\n def test_success(self):\n data = self.create_data('./tests/bin/hook-test', Event.\n ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 0}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data('./tests/bin/hook-test', 'fail')\n result = action.execute_command(**data)\n self.assertFalse(result)\n expected_result = {'stdout': self.create_stdout(data), 'stderr':\n 'STDERR', 'exit_code': 1}\n self.logger.debug.assert_called_with('Command Result: {0}'.format(\n json.dumps(expected_result, indent=4)))\n",
"step-5": "from mock import Mock\nfrom shelf.hook.background import action\nfrom shelf.hook.event import Event\nfrom tests.test_base import TestBase\nimport json\nimport os\nimport logging\nfrom pyproctor import MonkeyPatcher\n\n\nclass ExecuteCommandTest(TestBase):\n\n def setUp(self):\n super(ExecuteCommandTest, self).setUp()\n self.cwd = os.path.join(os.path.dirname(__file__), \"../../..\")\n self.logger = Mock()\n MonkeyPatcher.patch(action, \"create_background_logger\", Mock(return_value=self.logger))\n\n def create_data(self, command, event):\n data = {\n \"command\": command,\n \"log_level\": logging.DEBUG,\n \"event\": event,\n \"uri\": \"https://api.shelf.com/fake/artifact/1\",\n \"meta_uri\": \"https://api.shelf.com/fake/artifact/1/_meta\",\n \"cwd\": self.cwd\n }\n\n return data\n\n def create_stdout(self, data):\n l = [\n \"SHELF_EVENT={0}\".format(data[\"event\"]),\n \"SHELF_URI={0}\".format(data[\"uri\"]),\n \"SHELF_META_URI={0}\".format(data[\"meta_uri\"])\n ]\n\n return \", \".join(l)\n\n def test_success(self):\n data = self.create_data(\"./tests/bin/hook-test\", Event.ARTIFACT_UPLOADED)\n result = action.execute_command(**data)\n self.assertTrue(result)\n\n expected_result = {\n \"stdout\": self.create_stdout(data),\n \"stderr\": \"STDERR\",\n \"exit_code\": 0\n }\n\n self.logger.debug.assert_called_with(\"Command Result: {0}\".format(json.dumps(expected_result, indent=4)))\n\n def test_failure(self):\n data = self.create_data(\"./tests/bin/hook-test\", \"fail\")\n result = action.execute_command(**data)\n self.assertFalse(result)\n\n expected_result = {\n \"stdout\": self.create_stdout(data),\n \"stderr\": \"STDERR\",\n \"exit_code\": 1\n }\n\n self.logger.debug.assert_called_with(\"Command Result: {0}\".format(json.dumps(expected_result, indent=4)))\n",
"step-ids": [
1,
5,
6,
7,
8
]
}
|
[
1,
5,
6,
7,
8
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
import json
from collections import defaultdict, Counter
MOST_COMMON = 120000
savepath = r'D:\My Documents\My Project\experiment1\finished\test_vocabs.json'
dirpath = 'D:\\My Documents\\My Project\\experiment1\\finished\\test'
#dirpath = 'D:\\Corpus\\1-billion-word-language-modeling-benchmark-r13output\\1-billion-word-language-modeling-benchmark-r13output\\training-monolingual.tokenized.shuffled'
#savepath = 'D:\\My Documents\\My Project\\experiment1\\finished\\a.json'
#dirpath = 'D:\\My Documents\\My Project\\experiment1\\finished\\test'
def get_file_vocabs(file):
file_vocabs = Counter()
for sent in file.readlines():
voc = Counter()
for word in sent.split():
voc[word] += 1
file_vocabs.update(voc)
return file_vocabs
def get_vocab(dirpath):
vocabs = {}
cvocabs = Counter()
for filename in os.listdir(dirpath):
with open(dirpath + '\\' + filename, 'r', encoding='utf-8') as file:
file_vocabs = get_file_vocabs(file)
cvocabs.update(file_vocabs)
print('Step 1: Process file', filename)
n = len(cvocabs)
if n >= MOST_COMMON: n = MOST_COMMON
cvocabs = dict(cvocabs.most_common(n))
print('Step 2...')
for i, kk in enumerate(cvocabs.keys()):
vocabs[kk] = i + 1
return vocabs
if __name__ == '__main__':
vocabs = get_vocab(dirpath)
print('Saving...')
with open(savepath, 'w') as file:
file.write(json.dumps(vocabs))
|
normal
|
{
"blob_id": "d30e2fa4d5b0a0965dad7d69b672b8f4ad137ff4",
"index": 1359,
"step-1": "<mask token>\n\n\ndef get_file_vocabs(file):\n file_vocabs = Counter()\n for sent in file.readlines():\n voc = Counter()\n for word in sent.split():\n voc[word] += 1\n file_vocabs.update(voc)\n return file_vocabs\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_file_vocabs(file):\n file_vocabs = Counter()\n for sent in file.readlines():\n voc = Counter()\n for word in sent.split():\n voc[word] += 1\n file_vocabs.update(voc)\n return file_vocabs\n\n\ndef get_vocab(dirpath):\n vocabs = {}\n cvocabs = Counter()\n for filename in os.listdir(dirpath):\n with open(dirpath + '\\\\' + filename, 'r', encoding='utf-8') as file:\n file_vocabs = get_file_vocabs(file)\n cvocabs.update(file_vocabs)\n print('Step 1: Process file', filename)\n n = len(cvocabs)\n if n >= MOST_COMMON:\n n = MOST_COMMON\n cvocabs = dict(cvocabs.most_common(n))\n print('Step 2...')\n for i, kk in enumerate(cvocabs.keys()):\n vocabs[kk] = i + 1\n return vocabs\n\n\nif __name__ == '__main__':\n vocabs = get_vocab(dirpath)\n print('Saving...')\n with open(savepath, 'w') as file:\n file.write(json.dumps(vocabs))\n",
"step-3": "<mask token>\nMOST_COMMON = 120000\nsavepath = (\n 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\test_vocabs.json')\ndirpath = 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\test'\n\n\ndef get_file_vocabs(file):\n file_vocabs = Counter()\n for sent in file.readlines():\n voc = Counter()\n for word in sent.split():\n voc[word] += 1\n file_vocabs.update(voc)\n return file_vocabs\n\n\ndef get_vocab(dirpath):\n vocabs = {}\n cvocabs = Counter()\n for filename in os.listdir(dirpath):\n with open(dirpath + '\\\\' + filename, 'r', encoding='utf-8') as file:\n file_vocabs = get_file_vocabs(file)\n cvocabs.update(file_vocabs)\n print('Step 1: Process file', filename)\n n = len(cvocabs)\n if n >= MOST_COMMON:\n n = MOST_COMMON\n cvocabs = dict(cvocabs.most_common(n))\n print('Step 2...')\n for i, kk in enumerate(cvocabs.keys()):\n vocabs[kk] = i + 1\n return vocabs\n\n\nif __name__ == '__main__':\n vocabs = get_vocab(dirpath)\n print('Saving...')\n with open(savepath, 'w') as file:\n file.write(json.dumps(vocabs))\n",
"step-4": "import os\nimport os.path\nimport json\nfrom collections import defaultdict, Counter\nMOST_COMMON = 120000\nsavepath = (\n 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\test_vocabs.json')\ndirpath = 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\test'\n\n\ndef get_file_vocabs(file):\n file_vocabs = Counter()\n for sent in file.readlines():\n voc = Counter()\n for word in sent.split():\n voc[word] += 1\n file_vocabs.update(voc)\n return file_vocabs\n\n\ndef get_vocab(dirpath):\n vocabs = {}\n cvocabs = Counter()\n for filename in os.listdir(dirpath):\n with open(dirpath + '\\\\' + filename, 'r', encoding='utf-8') as file:\n file_vocabs = get_file_vocabs(file)\n cvocabs.update(file_vocabs)\n print('Step 1: Process file', filename)\n n = len(cvocabs)\n if n >= MOST_COMMON:\n n = MOST_COMMON\n cvocabs = dict(cvocabs.most_common(n))\n print('Step 2...')\n for i, kk in enumerate(cvocabs.keys()):\n vocabs[kk] = i + 1\n return vocabs\n\n\nif __name__ == '__main__':\n vocabs = get_vocab(dirpath)\n print('Saving...')\n with open(savepath, 'w') as file:\n file.write(json.dumps(vocabs))\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport os.path\nimport json\nfrom collections import defaultdict, Counter\n\nMOST_COMMON = 120000\n\nsavepath = r'D:\\My Documents\\My Project\\experiment1\\finished\\test_vocabs.json'\ndirpath = 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\test'\n#dirpath = 'D:\\\\Corpus\\\\1-billion-word-language-modeling-benchmark-r13output\\\\1-billion-word-language-modeling-benchmark-r13output\\\\training-monolingual.tokenized.shuffled'\n#savepath = 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\a.json'\n#dirpath = 'D:\\\\My Documents\\\\My Project\\\\experiment1\\\\finished\\\\test'\n\ndef get_file_vocabs(file):\n file_vocabs = Counter()\n for sent in file.readlines():\n voc = Counter()\n for word in sent.split():\n voc[word] += 1\n file_vocabs.update(voc)\n return file_vocabs\n\ndef get_vocab(dirpath):\n vocabs = {}\n cvocabs = Counter()\n for filename in os.listdir(dirpath):\n with open(dirpath + '\\\\' + filename, 'r', encoding='utf-8') as file:\n file_vocabs = get_file_vocabs(file)\n cvocabs.update(file_vocabs)\n print('Step 1: Process file', filename)\n\n n = len(cvocabs)\n if n >= MOST_COMMON: n = MOST_COMMON\n cvocabs = dict(cvocabs.most_common(n))\n\n print('Step 2...')\n for i, kk in enumerate(cvocabs.keys()):\n vocabs[kk] = i + 1\n\n return vocabs\n\nif __name__ == '__main__':\n vocabs = get_vocab(dirpath)\n print('Saving...')\n with open(savepath, 'w') as file:\n file.write(json.dumps(vocabs))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
'''OpenGL extension EXT.YUV_target
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.YUV_target to provide a more
Python-friendly API
Overview (from the spec)
This extension adds support for three new YUV related items: first
rendering to YUV images, second sampling from YUV images while keeping the
data in YUV space, third it defines a new built in function that does
conversion from RGB to YUV with controls to choose ITU-R BT.601-7,
ITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard.
This new functionality is layered on top of the OES_EGL_image_external
extension.
To perform the YUV rendering capability in this extension an application
will attach a texture to the framebuffer object as the color attachment.
If the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color
format then the GL driver can use this framebuffer object as the render
target, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed
with this extension.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/YUV_target.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.YUV_target import *
from OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME
def glInitYuvTargetEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
normal
|
{
"blob_id": "08420d31713859946b2f19cebf68c333331cb80e",
"index": 1494,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef glInitYuvTargetEXT():\n \"\"\"Return boolean indicating whether this extension is available\"\"\"\n from OpenGL import extensions\n return extensions.hasGLExtension(_EXTENSION_NAME)\n",
"step-3": "<mask token>\nfrom OpenGL import platform, constant, arrays\nfrom OpenGL import extensions, wrapper\nimport ctypes\nfrom OpenGL.raw.GLES2 import _types, _glgets\nfrom OpenGL.raw.GLES2.EXT.YUV_target import *\nfrom OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME\n\n\ndef glInitYuvTargetEXT():\n \"\"\"Return boolean indicating whether this extension is available\"\"\"\n from OpenGL import extensions\n return extensions.hasGLExtension(_EXTENSION_NAME)\n",
"step-4": "'''OpenGL extension EXT.YUV_target\n\nThis module customises the behaviour of the \nOpenGL.raw.GLES2.EXT.YUV_target to provide a more \nPython-friendly API\n\nOverview (from the spec)\n\t\n\tThis extension adds support for three new YUV related items: first\n\trendering to YUV images, second sampling from YUV images while keeping the\n\tdata in YUV space, third it defines a new built in function that does\n\tconversion from RGB to YUV with controls to choose ITU-R BT.601-7,\n\tITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard.\n\t\n\tThis new functionality is layered on top of the OES_EGL_image_external\n\textension.\n\t\n\tTo perform the YUV rendering capability in this extension an application\n\twill attach a texture to the framebuffer object as the color attachment.\n\tIf the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color\n\tformat then the GL driver can use this framebuffer object as the render\n\ttarget, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed\n\twith this extension.\n\nThe official definition of this extension is available here:\nhttp://www.opengl.org/registry/specs/EXT/YUV_target.txt\n'''\nfrom OpenGL import platform, constant, arrays\nfrom OpenGL import extensions, wrapper\nimport ctypes\nfrom OpenGL.raw.GLES2 import _types, _glgets\nfrom OpenGL.raw.GLES2.EXT.YUV_target import *\nfrom OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME\n\ndef glInitYuvTargetEXT():\n '''Return boolean indicating whether this extension is available'''\n from OpenGL import extensions\n return extensions.hasGLExtension( _EXTENSION_NAME )\n\n\n### END AUTOGENERATED SECTION",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Autor: Jazielinho
'''
import keyboard
from PIL import ImageGrab
import os
import tqdm
import random
from training import config_tr
class DataSet(object):
''' clase que crea dataset de entrenamiento '''
saltar = 'saltar'
nada = 'nada'
reglas = [saltar, nada]
formato = 'PNG'
train = 'train'
val = 'val'
def __init__(self, val_split: int = 0.2) -> None:
self.imagenes = []
self.targets = []
self.nombre_maximo = 0
nombres_maximos = []
for regla in DataSet.reglas:
if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla):
os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla)
if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla):
os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)
lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla) + \
os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)
if len(lista_imagenes) == 0:
nombre_maximo = [0]
else:
maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for x in lista_imagenes]
nombre_maximo = maximo_nombre
nombres_maximos = nombres_maximos + nombre_maximo
self.nombre_maximo = max(nombres_maximos)
self.val_split = val_split
def genera_datos(self) -> None:
imagenes = []
targets = []
# Empieza a funcionar desde presionar espacio
while True:
if keyboard.is_pressed('space'):
break
while True:
# Las imagenes estan en blanco y negro
imagen = ImageGrab.grab()
imagenes.append(imagen)
if keyboard.is_pressed('escape'):
break
if keyboard.is_pressed('space') or keyboard.is_pressed('up'):
targets.append(DataSet.saltar)
else:
targets.append(DataSet.nada)
self.imagenes = imagenes
self.targets = targets
self.guardar_info()
def guardar_info(self) -> None:
''' guardamos las imagenes '''
for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets), total=len(self.imagenes)):
self.nombre_maximo += 1
random_ = random.random()
if random_ <= 1 - self.val_split:
image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato
else:
image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato
imagen.save(image_PATH, DataSet.formato)
if __name__ == '__main__':
self = DataSet()
self.genera_datos()
|
normal
|
{
"blob_id": "c931d1ac5c2d003a8eaac3c6d777ce408df57117",
"index": 8534,
"step-1": "<mask token>\n\n\nclass DataSet(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataSet(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, val_split: int=0.2) ->None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.\n train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + regla)\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' +\n regla)\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' +\n DataSet.train + '/' + regla) + os.listdir(config_tr.\n PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for\n x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataSet(object):\n \"\"\" clase que crea dataset de entrenamiento \"\"\"\n saltar = 'saltar'\n nada = 'nada'\n reglas = [saltar, nada]\n formato = 'PNG'\n train = 'train'\n val = 'val'\n\n def __init__(self, val_split: int=0.2) ->None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.\n train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + regla)\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' +\n regla)\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' +\n DataSet.train + '/' + regla) + os.listdir(config_tr.\n PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for\n x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport keyboard\nfrom PIL import ImageGrab\nimport os\nimport tqdm\nimport random\nfrom training import config_tr\n\n\nclass DataSet(object):\n \"\"\" clase que crea dataset de entrenamiento \"\"\"\n saltar = 'saltar'\n nada = 'nada'\n reglas = [saltar, nada]\n formato = 'PNG'\n train = 'train'\n val = 'val'\n\n def __init__(self, val_split: int=0.2) ->None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.\n train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + regla)\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' +\n regla)\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' +\n DataSet.train + '/' + regla) + os.listdir(config_tr.\n PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for\n x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\nif __name__ == '__main__':\n self = DataSet()\n self.genera_datos()\n",
"step-5": "'''\nAutor: Jazielinho\n'''\n\nimport keyboard\nfrom PIL import ImageGrab\nimport os\nimport tqdm\nimport random\n\nfrom training import config_tr\n\n\nclass DataSet(object):\n ''' clase que crea dataset de entrenamiento '''\n\n saltar = 'saltar'\n nada = 'nada'\n reglas = [saltar, nada]\n formato = 'PNG'\n train = 'train'\n val = 'val'\n\n def __init__(self, val_split: int = 0.2) -> None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla)\n\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla) + \\\n os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) -> None:\n imagenes = []\n targets = []\n\n # Empieza a funcionar desde presionar espacio\n while True:\n if keyboard.is_pressed('space'):\n break\n\n while True:\n # Las imagenes estan en blanco y negro\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n\n if keyboard.is_pressed('escape'):\n break\n\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n\n self.imagenes = imagenes\n self.targets = targets\n\n self.guardar_info()\n\n def guardar_info(self) -> None:\n ''' guardamos las imagenes '''\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets), total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato\n else:\n image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato\n imagen.save(image_PATH, DataSet.formato)\n\n\nif __name__ == '__main__':\n self = DataSet()\n\n self.genera_datos()\n",
"step-ids": [
3,
4,
6,
8,
9
]
}
|
[
3,
4,
6,
8,
9
] |
import tornado.web
import tornado.escape
from torcms.core.base_handler import BaseHandler
from owslib.csw import CatalogueServiceWeb
from owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
def get(self, url_str=''):
url_arr = self.parse_url(url_str)
if len(url_str) > 0:
url_arr = url_str.split('/')
# if url_str == '':
# self.render('metadata/meta_index.html')
if url_str == '':
self.list('')
elif url_arr[0] == 'search':
if len(url_arr[0]) >= 3:
self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])
else:
self.search(url_arr[1], url_arr[2], '', 10)
elif url_arr[0] == 'view':
self.ajax_get(url_arr[1], url_arr[2])
# def post(self, *args, **kwargs):
# post_data = self.get_request_arguments()
# keyword = post_data.get('keyw9', '')
# isweb = post_data.get('isweb', '1')
# ldrt = post_data.get('ldrt', '')
# maxrecords = post_data.get('maxrecords', 20)
#
# self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))
# def search(self, keyw):
# # print('====' * 40)
# # print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def list(self, keyw):
# print('====' * 40)
# print(post_data)
keyw = 'data'
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query_like], maxrecords=20)
print('-' * 20)
print(csw.results)
for rec in csw.results:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/meta_index.html',
meta_results=csw.records,
userinfo=self.userinfo)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def search(self, keyw, isweb, ldrt, max_num):
# print('=' * 40)
# print(ldrt)
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) +1
print("," * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
# birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,
distributedsearch=True,
hopcount=2)
else:
if isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,
hopcount=2)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/show_result.html',
meta_results=csw.records,
userinfo=self.userinfo,
isweb=isweb,
startnum = startnum
)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# def get_result(self, post_data):
# print('====' * 40)
# print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5'.format(
# post_data['keyw'][0])
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# # data = urllib.request.Request(url)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
# uuid = uuid.split(':')[-1]
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,
hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {
'title': '',
'uid': '',
'sizhi': '',
}
self.render('../torcms_dde/search/show_rec.html',
kws=out_dict,
# meta_rec=csw.records.get(uuid),
meta_rec=rec,
unescape=tornado.escape.xhtml_unescape,
userinfo=self.userinfo
)
# #
# def parseXML(self, data):
#
# tree = etree.fromstring(data)
# # root = tree.getroot()
# uu = tree.findall('zs:record', tree.nsmap)
#
# meta_arr = []
# for x in uu:
# meta_arr.append(MyXML(x))
# # print(x.element('ows:LowerCorner'))
# # uu = etree.SubElement(x, "LowerCorner")
# # for sub_ele in x.iter():
# # print(sub_ele.tag)
# # if 'title' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
# # if 'LowerCorner' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
#
# self.render('metadata/show_result.html',
# meta_arr=meta_arr)
class MyXML():
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
|
normal
|
{
"blob_id": "72ce7c48c9d1a7bcdbaead12648d03970663a11e",
"index": 3227,
"step-1": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n <mask token>\n <mask token>\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-2": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n <mask token>\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-3": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n\n def list(self, keyw):\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n for rec in csw.results:\n print(rec)\n self.render('../torcms_dde/search/meta_index.html', meta_results=\n csw.records, userinfo=self.userinfo)\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-4": "import tornado.web\nimport tornado.escape\nfrom torcms.core.base_handler import BaseHandler\nfrom owslib.csw import CatalogueServiceWeb\nfrom owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n\n def get(self, url_str=''):\n url_arr = self.parse_url(url_str)\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n if url_str == '':\n self.list('')\n elif url_arr[0] == 'search':\n if len(url_arr[0]) >= 3:\n self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])\n else:\n self.search(url_arr[1], url_arr[2], '', 10)\n elif url_arr[0] == 'view':\n self.ajax_get(url_arr[1], url_arr[2])\n\n def list(self, keyw):\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n for rec in csw.results:\n print(rec)\n self.render('../torcms_dde/search/meta_index.html', meta_results=\n csw.records, userinfo=self.userinfo)\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-5": "import tornado.web\nimport tornado.escape\nfrom torcms.core.base_handler import BaseHandler\nfrom owslib.csw import CatalogueServiceWeb\nfrom owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox\n\n\nclass DirectorySearchHandler(BaseHandler):\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n\n def get(self, url_str=''):\n url_arr = self.parse_url(url_str)\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n # if url_str == '':\n # self.render('metadata/meta_index.html')\n\n if url_str == '':\n self.list('')\n elif url_arr[0] == 'search':\n if len(url_arr[0]) >= 3:\n self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])\n else:\n self.search(url_arr[1], url_arr[2], '', 10)\n\n elif url_arr[0] == 'view':\n self.ajax_get(url_arr[1], url_arr[2])\n\n # def post(self, *args, **kwargs):\n # post_data = self.get_request_arguments()\n # keyword = post_data.get('keyw9', '')\n # isweb = post_data.get('isweb', '1')\n # ldrt = post_data.get('ldrt', '')\n # maxrecords = post_data.get('maxrecords', 20)\n #\n # self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))\n\n # def search(self, keyw):\n # # print('====' * 40)\n # # print(post_data)\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\n # &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n def list(self, keyw):\n # print('====' * 40)\n # print(post_data)\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n\n for rec in csw.results:\n print(rec)\n\n # out_dic = {}\n # for rec in csw.records:\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\\\n # maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n\n self.render('../torcms_dde/search/meta_index.html',\n meta_results=csw.records,\n userinfo=self.userinfo)\n\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n\n def search(self, keyw, isweb, ldrt, max_num):\n # print('=' * 40)\n # print(ldrt)\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n\n startposition = int(startnum) * int(max_num) +1\n print(\",\" * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n # birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n\n\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n\n print(xx_ldrt)\n\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n\n # birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)\n\n else:\n\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,\n distributedsearch=True,\n hopcount=2)\n else:\n if isweb == '1':\n\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,\n hopcount=2)\n print('-' * 20)\n print(isweb)\n print(csw.results)\n\n for rec in csw.records:\n print(rec)\n\n # out_dic = {}\n # for rec in csw.records:\n\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&\n # maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n\n self.render('../torcms_dde/search/show_result.html',\n meta_results=csw.records,\n userinfo=self.userinfo,\n isweb=isweb,\n startnum = startnum\n )\n\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n\n # def get_result(self, post_data):\n # print('====' * 40)\n # print(post_data)\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\n # &maximumRecords=5&startRecord=5'.format(\n # post_data['keyw'][0])\n # r = requests.get(url)\n # pprint.pprint(r.text)\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n # # data = urllib.request.Request(url)\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n # uuid = uuid.split(':')[-1]\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n # birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,\n hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n\n out_dict = {\n 'title': '',\n 'uid': '',\n 'sizhi': '',\n\n }\n\n self.render('../torcms_dde/search/show_rec.html',\n kws=out_dict,\n # meta_rec=csw.records.get(uuid),\n meta_rec=rec,\n unescape=tornado.escape.xhtml_unescape,\n userinfo=self.userinfo\n )\n\n # #\n # def parseXML(self, data):\n #\n # tree = etree.fromstring(data)\n # # root = tree.getroot()\n # uu = tree.findall('zs:record', tree.nsmap)\n #\n # meta_arr = []\n # for x in uu:\n # meta_arr.append(MyXML(x))\n # # print(x.element('ows:LowerCorner'))\n # # uu = etree.SubElement(x, \"LowerCorner\")\n # # for sub_ele in x.iter():\n # # print(sub_ele.tag)\n # # if 'title' == sub_ele.tag.split('}')[1]:\n # # print(sub_ele.text)\n # # if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n # # print(sub_ele.text)\n #\n # self.render('metadata/show_result.html',\n # meta_arr=meta_arr)\n\n\nclass MyXML():\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db.models import Q
from cvmo import settings
from cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry
from cvmo.context.plugins import ContextPlugins
from cvmo.context.utils.views import uncache_response
from cvmo.context.utils.views import get_list_allowed_abstract
def welcome(request):
return render_to_response('pages/welcome.html', {}, RequestContext(request))
def dashboard(request):
context = {
'context_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=False)).order_by('-public', 'name'),
'full_abstract_list': get_list_allowed_abstract(request),
'my_abstract_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=True)).order_by('name'),
'cluster_list': ClusterDefinition.objects.filter(owner=request.user).order_by('-public', 'name'),
'machine_list': Machines.objects.filter(owner=request.user)
}
context["webapi_configurations"] = settings.WEBAPI_CONFIGURATIONS
push_to_context("redirect_msg_info", "msg_info", context, request)
push_to_context("redirect_msg_error", "msg_error", context, request)
push_to_context("redirect_msg_warning", "msg_warning", context, request)
push_to_context("redirect_msg_confirm", "msg_confirm", context, request)
return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request)))
def test(request):
raw = "<h1>404 - Not found</h1><p>This is not the website you are looking for</p>"
return render_to_response('core/raw.html', {'body': raw}, RequestContext(request))
def push_to_context(sessionName, contextName, context, request):
if sessionName in request.session:
context[contextName] = request.session[sessionName]
del request.session[sessionName]
|
normal
|
{
"blob_id": "4db8b4403dd9064b7d5f935d4b9d111508c965fb",
"index": 1268,
"step-1": "<mask token>\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request)\n )\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\ndef test(request):\n raw = (\n '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'\n )\n return render_to_response('core/raw.html', {'body': raw},\n RequestContext(request))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request)\n )\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\ndef test(request):\n raw = (\n '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'\n )\n return render_to_response('core/raw.html', {'body': raw},\n RequestContext(request))\n\n\ndef push_to_context(sessionName, contextName, context, request):\n if sessionName in request.session:\n context[contextName] = request.session[sessionName]\n del request.session[sessionName]\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.db.models import Q\nfrom cvmo import settings\nfrom cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry\nfrom cvmo.context.plugins import ContextPlugins\nfrom cvmo.context.utils.views import uncache_response\nfrom cvmo.context.utils.views import get_list_allowed_abstract\n\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request)\n )\n\n\ndef dashboard(request):\n context = {'context_list': ContextDefinition.objects.filter(Q(owner=\n request.user) & Q(inherited=False) & Q(abstract=False)).order_by(\n '-public', 'name'), 'full_abstract_list': get_list_allowed_abstract\n (request), 'my_abstract_list': ContextDefinition.objects.filter(Q(\n owner=request.user) & Q(inherited=False) & Q(abstract=True)).\n order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(\n owner=request.user).order_by('-public', 'name'), 'machine_list':\n Machines.objects.filter(owner=request.user)}\n context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS\n push_to_context('redirect_msg_info', 'msg_info', context, request)\n push_to_context('redirect_msg_error', 'msg_error', context, request)\n push_to_context('redirect_msg_warning', 'msg_warning', context, request)\n push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)\n return uncache_response(render_to_response('pages/dashboard.html',\n context, RequestContext(request)))\n\n\ndef test(request):\n raw = (\n '<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'\n )\n return render_to_response('core/raw.html', {'body': raw},\n RequestContext(request))\n\n\ndef push_to_context(sessionName, contextName, context, request):\n if sessionName in request.session:\n context[contextName] = request.session[sessionName]\n del request.session[sessionName]\n",
"step-5": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.db.models import Q\n\nfrom cvmo import settings\n\nfrom cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry\n\nfrom cvmo.context.plugins import ContextPlugins\nfrom cvmo.context.utils.views import uncache_response\n\nfrom cvmo.context.utils.views import get_list_allowed_abstract\n\ndef welcome(request):\n return render_to_response('pages/welcome.html', {}, RequestContext(request))\n\ndef dashboard(request):\n context = {\n 'context_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=False)).order_by('-public', 'name'),\n 'full_abstract_list': get_list_allowed_abstract(request),\n 'my_abstract_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=True)).order_by('name'),\n 'cluster_list': ClusterDefinition.objects.filter(owner=request.user).order_by('-public', 'name'),\n 'machine_list': Machines.objects.filter(owner=request.user)\n }\n context[\"webapi_configurations\"] = settings.WEBAPI_CONFIGURATIONS\n push_to_context(\"redirect_msg_info\", \"msg_info\", context, request)\n push_to_context(\"redirect_msg_error\", \"msg_error\", context, request)\n push_to_context(\"redirect_msg_warning\", \"msg_warning\", context, request)\n push_to_context(\"redirect_msg_confirm\", \"msg_confirm\", context, request)\n\n return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request)))\n\ndef test(request):\n raw = \"<h1>404 - Not found</h1><p>This is not the website you are looking for</p>\"\n return render_to_response('core/raw.html', {'body': raw}, RequestContext(request))\n\ndef push_to_context(sessionName, contextName, context, request):\n if sessionName in request.session:\n context[contextName] = request.session[sessionName]\n del request.session[sessionName]\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import unittest
from .context import *
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_hello_world(self):
self.assertEqual(hello_world(), 'hello world')
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "6420d1b9da7ff205e1e138f72b194f63d1011012",
"index": 4554,
"step-1": "<mask token>\n\n\nclass BasicTestSuite(unittest.TestCase):\n <mask token>\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom .context import *\n\n\nclass BasicTestSuite(unittest.TestCase):\n \"\"\"Basic test cases.\"\"\"\n\n def test_hello_world(self):\n self.assertEqual(hello_world(), 'hello world')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import os
class Config(object):
"""Base Config Object"""
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'
UPLOAD_FOLDER = './uploads'
dbconfig = {'host': os.environ.get('MYSQL_HOST') or 'localhost', 'user': os
.environ.get('MYSQL_USER') or 'root', 'password': os.environ.get(
'MYSQL_PASSWORD') or '', 'db': os.environ.get('MYSQL_DB') or
'finalproject2.sql'}
class DevelopmentConfig(Config):
"""Development Config that extends the Base Config Object"""
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(Config):
"""Production Config that extends the Base Config Object"""
DEBUG = False
|
normal
|
{
"blob_id": "833923c1928862e13c24904f5614927a683b168f",
"index": 611,
"step-1": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-2": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Development Config that extends the Base Config Object\"\"\"\n DEVELOPMENT = True\n DEBUG = True\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n \"\"\"Base Config Object\"\"\"\n DEBUG = False\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'\n UPLOAD_FOLDER = './uploads'\n\n\n<mask token>\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Development Config that extends the Base Config Object\"\"\"\n DEVELOPMENT = True\n DEBUG = True\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-4": "import os\n\n\nclass Config(object):\n \"\"\"Base Config Object\"\"\"\n DEBUG = False\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'Som3$ec5etK*y'\n UPLOAD_FOLDER = './uploads'\n\n\ndbconfig = {'host': os.environ.get('MYSQL_HOST') or 'localhost', 'user': os\n .environ.get('MYSQL_USER') or 'root', 'password': os.environ.get(\n 'MYSQL_PASSWORD') or '', 'db': os.environ.get('MYSQL_DB') or\n 'finalproject2.sql'}\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Development Config that extends the Base Config Object\"\"\"\n DEVELOPMENT = True\n DEBUG = True\n\n\nclass ProductionConfig(Config):\n \"\"\"Production Config that extends the Base Config Object\"\"\"\n DEBUG = False\n",
"step-5": null,
"step-ids": [
4,
6,
9,
11
]
}
|
[
4,
6,
9,
11
] |
'''
Created on Mar 7, 2019
@author: hzhang0418
'''
import pymp
from v6.mono import Mono
class BruteForce(Mono):
def __init__(self, features, labels, params):
super(BruteForce, self).__init__(features, labels, params)
def _count_inconsistencies(self):
if self.num_cores==1:
for ni in self.nonmatch_indices:
self.index2count[ni] = 0
for mi in self.match_indices:
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
self.index2count[ni] += 1
self.index2count[mi] = count
else:
nmatch = len(self.match_indices)
threads2incons_count = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons_count = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
count = 0
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
count += 1
if ni in local_index2incons_count:
local_index2incons_count[ni] += 1
else:
local_index2incons_count[ni] = 1
if count>0:
local_index2incons_count[mi] = count
threads2incons_count[p.thread_num] = local_index2incons_count
for _, local_index2incons_count in threads2incons_count.items():
for index, count in local_index2incons_count.items():
if index in self.index2count:
self.index2count[index] += count
else:
self.index2count[index] = count
return self.index2count
def _get_inconsistency_indices(self):
if self.num_cores==1:
for mi in self.match_indices:
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices)>0:
self.index2incons[mi] = incons_indices
for ni in incons_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
else:
nmatch = len(self.match_indices)
threads2incons = pymp.shared.dict()
with pymp.Parallel(self.num_cores) as p:
local_index2incons = {}
for index in p.range(nmatch):
mi = self.match_indices[index]
match_features = self.features[mi]
incons_indices = []
for ni in self.nonmatch_indices:
inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)
if inconsistent == True:
incons_indices.append(ni)
if len(incons_indices)>0:
local_index2incons[mi] = incons_indices
threads2incons[p.thread_num] = local_index2incons
for _, local_index2incons in threads2incons.items():
for mi, ni_indices in local_index2incons.items():
self.index2incons[mi] = ni_indices
for ni in ni_indices:
if ni in self.index2incons:
self.index2incons[ni].append(mi)
else:
self.index2incons[ni] = [mi]
return self.index2incons
|
normal
|
{
"blob_id": "32c18bd578bbf91c76604f063421a65a4f7a8b63",
"index": 2204,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BruteForce(Mono):\n <mask token>\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-3": "<mask token>\n\n\nclass BruteForce(Mono):\n\n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-4": "<mask token>\nimport pymp\nfrom v6.mono import Mono\n\n\nclass BruteForce(Mono):\n\n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n\n def _count_inconsistencies(self):\n if self.num_cores == 1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n else:\n nmatch = len(self.match_indices)\n threads2incons_count = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n if count > 0:\n local_index2incons_count[mi] = count\n threads2incons_count[p.thread_num] = local_index2incons_count\n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count\n return self.index2count\n\n def _get_inconsistency_indices(self):\n if self.num_cores == 1:\n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n else:\n nmatch = len(self.match_indices)\n threads2incons = pymp.shared.dict()\n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features,\n self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n if len(incons_indices) > 0:\n local_index2incons[mi] = incons_indices\n threads2incons[p.thread_num] = local_index2incons\n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n return self.index2incons\n",
"step-5": "'''\nCreated on Mar 7, 2019\n\n@author: hzhang0418\n'''\n\nimport pymp\n\nfrom v6.mono import Mono\n\nclass BruteForce(Mono):\n \n def __init__(self, features, labels, params):\n super(BruteForce, self).__init__(features, labels, params)\n \n \n def _count_inconsistencies(self):\n if self.num_cores==1:\n for ni in self.nonmatch_indices:\n self.index2count[ni] = 0\n \n for mi in self.match_indices:\n match_features = self.features[mi]\n count = 0\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n self.index2count[ni] += 1\n self.index2count[mi] = count\n \n else:\n nmatch = len(self.match_indices)\n \n threads2incons_count = pymp.shared.dict()\n \n with pymp.Parallel(self.num_cores) as p:\n local_index2incons_count = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n \n count = 0\n \n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n count += 1\n if ni in local_index2incons_count:\n local_index2incons_count[ni] += 1\n else:\n local_index2incons_count[ni] = 1\n \n if count>0:\n local_index2incons_count[mi] = count\n \n threads2incons_count[p.thread_num] = local_index2incons_count\n \n for _, local_index2incons_count in threads2incons_count.items():\n for index, count in local_index2incons_count.items():\n if index in self.index2count:\n self.index2count[index] += count\n else:\n self.index2count[index] = count \n \n return self.index2count\n \n \n def _get_inconsistency_indices(self):\n \n if self.num_cores==1:\n \n for mi in self.match_indices:\n match_features = self.features[mi]\n incons_indices = []\n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n \n if len(incons_indices)>0:\n self.index2incons[mi] = incons_indices\n for ni in incons_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n \n else:\n \n nmatch = len(self.match_indices)\n \n threads2incons = pymp.shared.dict()\n \n with pymp.Parallel(self.num_cores) as p:\n local_index2incons = {}\n for index in p.range(nmatch):\n mi = self.match_indices[index]\n match_features = self.features[mi]\n \n incons_indices = []\n \n for ni in self.nonmatch_indices:\n inconsistent = self.compare_features(match_features, self.features[ni], self.min_con_dim)\n if inconsistent == True:\n incons_indices.append(ni)\n \n if len(incons_indices)>0:\n local_index2incons[mi] = incons_indices\n \n threads2incons[p.thread_num] = local_index2incons\n \n for _, local_index2incons in threads2incons.items():\n for mi, ni_indices in local_index2incons.items():\n self.index2incons[mi] = ni_indices\n for ni in ni_indices:\n if ni in self.index2incons:\n self.index2incons[ni].append(mi)\n else:\n self.index2incons[ni] = [mi]\n \n return self.index2incons",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
k = 0
for x in range(100, 1000, 2):
x = str(x)
if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:
k += 1
print(k)
|
normal
|
{
"blob_id": "af6dd7bde25453f25c0701e4ac246ff6bce29fa7",
"index": 1141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(100, 1000, 2):\n x = str(x)\n if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:\n k += 1\nprint(k)\n",
"step-3": "k = 0\nfor x in range(100, 1000, 2):\n x = str(x)\n if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:\n k += 1\nprint(k)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from bs4 import BeautifulSoup
from aiounfurl.parsers import oembed
def test_oembed_not_match(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'http://test.com'
assert oembed_url_extractor.get_oembed_url(url) is None
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
def test_oembed_params(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,
params={'maxwidth': 200})
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
assert 'maxwidth=200' in oembed_url
|
normal
|
{
"blob_id": "7b2ad0b4eca7b31b314e32ad57d51be82f0eaf61",
"index": 6979,
"step-1": "<mask token>\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_params(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,\n params={'maxwidth': 200})\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n assert 'maxwidth=200' in oembed_url\n",
"step-4": "from bs4 import BeautifulSoup\nfrom aiounfurl.parsers import oembed\n\n\ndef test_oembed_not_match(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'http://test.com'\n assert oembed_url_extractor.get_oembed_url(url) is None\n\n\ndef test_oembed_founded(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_discovery(oembed_providers, files_dir):\n oembed_html = (files_dir / 'oembed_json.html').read_text()\n soup = BeautifulSoup(oembed_html)\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)\n oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)\n assert isinstance(oembed_url, str)\n\n\ndef test_oembed_params(oembed_providers):\n oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers,\n params={'maxwidth': 200})\n url = 'https://www.instagram.com/p/BNHh2YJDdcY/'\n oembed_url = oembed_url_extractor.get_oembed_url(url)\n assert isinstance(oembed_url, str)\n assert 'maxwidth=200' in oembed_url\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# dates.py
"""Date/time parsing and manipulation functions
"""
# Some people, when confronted with a problem, think
# "I know, I'll use regular expressions."
# Now they have two problems.
# -- Jamie Zawinski
import datetime as dt
import time
import re
_months = [
'january',
'february',
'march',
'april',
'may',
'june',
'july',
'august',
'september',
'october',
'november',
'december',
]
# Formatting directives and corresponding regular expression
_regexps = {
'B': r'(?P<b>' + '|'.join(_months) + ')',
'b': r'(?P<b>' + '|'.join(m[0:3] for m in _months) + ')',
'm': r'(?P<m>\d\d?)',
'd': r'(?P<d>\d\d?)',
'Y': r'(?P<Y>\d\d\d\d)',
'y': r'(?P<y>\d\d)',
'I': r'(?P<H>0?[1-9]|1[012])',
'H': r'(?P<H>[01]?[0-9]|2[0-3])',
'M': r'(?P<M>[0-5]\d)',
'S': r'(?P<S>[0-5]\d)',
'f': r'(?P<f>\d+)',
'p': r'(?P<p>am|pm)',
}
# Support date formats and examples
_date_formats = [
'B d, Y', # October 15, 2006
'b d, Y', # Oct 15, 2006
'B d Y', # October 15 2006
'b d Y', # Oct 15 2006
'B d', # October 15
'b d', # Oct 15
'Y/m/d', # 2006/10/15
'Y-m-d', # 2006-10-15
'm/d/Y', # 10/15/2006
'm-d-Y', # 10-15-2006
'm/d/y', # 10/15/06
'm-d-y', # 10-15-06
'y/m/d', # 06/10/15
'y-m-d', # 06-10-15
]
# Supported time formats and examples
_time_formats = [
'I:M:S.f p', # 3:05:29.108 PM
'H:M:S.f', # 15:05:29.108
'I:M:S p', # 3:05:29 PM
'H:M:S', # 15:05:29
'I:M p', # 3:05 PM
'H:M', # 15:05
]
class CannotParse (Exception):
"""Failure to parse a date or time.
"""
pass
def parse(string, format):
"""Attempt to parse the given string as a date in the given format.
This is similar to `datetime.strptime`, but this can handle date strings
with trailing characters. If it still fails to parse, raise a
`CannotParse` exception.
Examples::
>>> parse('2010/08/28', '%Y/%m/%d')
datetime.datetime(2010, 8, 28, 0, 0)
>>> parse('2010/08/28 extra stuff', '%Y/%m/%d')
datetime.datetime(2010, 8, 28, 0, 0)
>>> parse('2010/08/28', '%m/%d/%y')
Traceback (most recent call last):
CannotParse: time data '2010/08/28' does not match format '%m/%d/%y'
"""
# Count the number of spaces in the format string (N), and
# truncate everything after the (N+1)th space
spaces = format.count(' ') + 1
string = ' '.join(string.split()[:spaces])
try:
result = dt.datetime.strptime(string, format)
except ValueError, err:
raise CannotParse(str(err))
else:
return result
def format_regexp(simple_format):
r"""Given a simplified date or time format string, return ``(format,
regexp)``, where ``format`` is a `strptime`-compatible format string, and
``regexp`` is a regular expression that matches dates or times in that
format.
The ``simple_format`` string supports a subset of `strptime` formatting
directives, with the leading ``%`` characters removed.
Examples::
>>> format_regexp('Y/m/d')
('%Y/%m/%d', '(?P<Y>\\d\\d\\d\\d)/(?P<m>\\d\\d?)/(?P<d>\\d\\d?)')
>>> format_regexp('H:M:S')
('%H:%M:%S', '(?P<H>[01]?[0-9]|2[0-3]):(?P<M>[0-5]\\d):(?P<S>[0-5]\\d)')
"""
format, regexp = ('', '')
for char in simple_format:
if char in _regexps:
format += '%' + char
regexp += _regexps[char]
else:
format += char
regexp += char
return (format, regexp)
def _compiled_format_regexps(date_formats, time_formats):
"""Return a list of ``(format, compiled_regexp)`` for all combinations
of ``date_formats`` and ``time_formats``.
"""
# List of all combinations of date_formats and time_formats
date_time_formats = []
for df in date_formats:
for tf in time_formats:
date_time_formats.append(df + ' ' + tf)
# Add date-only formats
for df in date_formats:
date_time_formats.append(df)
# Add time-only formats
for tf in time_formats:
date_time_formats.append(tf)
# (format, compiled_regexp) for each supported format
format_regexps = []
for dt_format in date_time_formats:
format, regexp = format_regexp(dt_format)
# Compile the regexp
format_regexps.append(
(format, re.compile(regexp, re.IGNORECASE))
)
return format_regexps
def guess_format(string):
"""Try to guess the date/time format of ``string``, or raise a
`CannotParse` exception.
Examples::
>>> guess_format('2010/01/28 13:25:49')
'%Y/%m/%d %H:%M:%S'
>>> guess_format('01/28/10 1:25:49 PM')
'%m/%d/%y %I:%M:%S %p'
>>> guess_format('01/28/2010 13:25:49.123')
'%m/%d/%Y %H:%M:%S.%f'
>>> guess_format('Aug 15 2009 15:24')
'%b %d %Y %H:%M'
>>> guess_format('3-14-15 9:26:53.589')
'%m-%d-%y %H:%M:%S.%f'
Leading and trailing text may be present::
>>> guess_format('FOO April 1, 2007 3:45 PM BAR')
'%B %d, %Y %I:%M %p'
>>> guess_format('[[2010-09-25 14:19:24]]')
'%Y-%m-%d %H:%M:%S'
"""
format_regexps = _compiled_format_regexps(_date_formats, _time_formats)
for format, regexp in format_regexps:
if regexp.search(string):
return format
# Nothing matched
raise CannotParse("Could not guess date/time format in: %s" % string)
def guess_file_date_format(filename):
"""Open the given file and use `guess_format` to look for a
date/time at the beginning of each line. Return the format string for
the first one that's found. Raise `CannotParse` if none is found.
"""
for line in open(filename):
try:
format = guess_format(line)
except CannotParse:
pass
else:
return format
raise CannotParse("No date/time strings found in '%s'" % filename)
def date_chop(line, dateformat='%m/%d/%y %I:%M:%S %p', resolution=60):
"""Given a ``line`` of text, get a date/time formatted as ``dateformat``,
and return a `datetime` object rounded to the nearest ``resolution``
seconds. If ``line`` fails to match ``dateformat``, a `CannotParse`
exception is raised.
Examples::
>>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 60)
datetime.datetime(1976, 5, 19, 12, 5)
>>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 3600)
datetime.datetime(1976, 5, 19, 12, 0)
"""
timestamp = parse(line, dateformat)
# Round the timestamp to the given resolution
# First convert to seconds-since-epoch
epoch_seconds = int(time.mktime(timestamp.timetuple()))
# Then do integer division to truncate
rounded_seconds = (epoch_seconds / resolution) * resolution
# Convert back to a datetime
return dt.datetime.fromtimestamp(rounded_seconds)
|
normal
|
{
"blob_id": "458bc2b5f843e4c5bb3f9180ab2cbec7409b8d3e",
"index": 4946,
"step-1": "# dates.py\n\n\"\"\"Date/time parsing and manipulation functions\n\"\"\"\n\n# Some people, when confronted with a problem, think\n# \"I know, I'll use regular expressions.\"\n# Now they have two problems.\n# -- Jamie Zawinski\n\nimport datetime as dt\nimport time\nimport re\n\n_months = [\n 'january',\n 'february',\n 'march',\n 'april',\n 'may',\n 'june',\n 'july',\n 'august',\n 'september',\n 'october',\n 'november',\n 'december',\n]\n\n# Formatting directives and corresponding regular expression\n_regexps = {\n 'B': r'(?P<b>' + '|'.join(_months) + ')',\n 'b': r'(?P<b>' + '|'.join(m[0:3] for m in _months) + ')',\n 'm': r'(?P<m>\\d\\d?)',\n 'd': r'(?P<d>\\d\\d?)',\n 'Y': r'(?P<Y>\\d\\d\\d\\d)',\n 'y': r'(?P<y>\\d\\d)',\n 'I': r'(?P<H>0?[1-9]|1[012])',\n 'H': r'(?P<H>[01]?[0-9]|2[0-3])',\n 'M': r'(?P<M>[0-5]\\d)',\n 'S': r'(?P<S>[0-5]\\d)',\n 'f': r'(?P<f>\\d+)',\n 'p': r'(?P<p>am|pm)',\n}\n\n# Support date formats and examples\n_date_formats = [\n 'B d, Y', # October 15, 2006\n 'b d, Y', # Oct 15, 2006\n 'B d Y', # October 15 2006\n 'b d Y', # Oct 15 2006\n 'B d', # October 15\n 'b d', # Oct 15\n 'Y/m/d', # 2006/10/15\n 'Y-m-d', # 2006-10-15\n 'm/d/Y', # 10/15/2006\n 'm-d-Y', # 10-15-2006\n 'm/d/y', # 10/15/06\n 'm-d-y', # 10-15-06\n 'y/m/d', # 06/10/15\n 'y-m-d', # 06-10-15\n]\n\n# Supported time formats and examples\n_time_formats = [\n 'I:M:S.f p', # 3:05:29.108 PM\n 'H:M:S.f', # 15:05:29.108\n 'I:M:S p', # 3:05:29 PM\n 'H:M:S', # 15:05:29\n 'I:M p', # 3:05 PM\n 'H:M', # 15:05\n]\n\n\nclass CannotParse (Exception):\n \"\"\"Failure to parse a date or time.\n \"\"\"\n pass\n\n\ndef parse(string, format):\n \"\"\"Attempt to parse the given string as a date in the given format.\n This is similar to `datetime.strptime`, but this can handle date strings\n with trailing characters. If it still fails to parse, raise a\n `CannotParse` exception.\n\n Examples::\n\n >>> parse('2010/08/28', '%Y/%m/%d')\n datetime.datetime(2010, 8, 28, 0, 0)\n\n >>> parse('2010/08/28 extra stuff', '%Y/%m/%d')\n datetime.datetime(2010, 8, 28, 0, 0)\n\n >>> parse('2010/08/28', '%m/%d/%y')\n Traceback (most recent call last):\n CannotParse: time data '2010/08/28' does not match format '%m/%d/%y'\n\n \"\"\"\n # Count the number of spaces in the format string (N), and\n # truncate everything after the (N+1)th space\n spaces = format.count(' ') + 1\n string = ' '.join(string.split()[:spaces])\n\n try:\n result = dt.datetime.strptime(string, format)\n except ValueError, err:\n raise CannotParse(str(err))\n else:\n return result\n\n\ndef format_regexp(simple_format):\n r\"\"\"Given a simplified date or time format string, return ``(format,\n regexp)``, where ``format`` is a `strptime`-compatible format string, and\n ``regexp`` is a regular expression that matches dates or times in that\n format.\n\n The ``simple_format`` string supports a subset of `strptime` formatting\n directives, with the leading ``%`` characters removed.\n\n Examples::\n\n >>> format_regexp('Y/m/d')\n ('%Y/%m/%d', '(?P<Y>\\\\d\\\\d\\\\d\\\\d)/(?P<m>\\\\d\\\\d?)/(?P<d>\\\\d\\\\d?)')\n\n >>> format_regexp('H:M:S')\n ('%H:%M:%S', '(?P<H>[01]?[0-9]|2[0-3]):(?P<M>[0-5]\\\\d):(?P<S>[0-5]\\\\d)')\n\n \"\"\"\n format, regexp = ('', '')\n for char in simple_format:\n if char in _regexps:\n format += '%' + char\n regexp += _regexps[char]\n else:\n format += char\n regexp += char\n return (format, regexp)\n\n\ndef _compiled_format_regexps(date_formats, time_formats):\n \"\"\"Return a list of ``(format, compiled_regexp)`` for all combinations\n of ``date_formats`` and ``time_formats``.\n \"\"\"\n # List of all combinations of date_formats and time_formats\n date_time_formats = []\n for df in date_formats:\n for tf in time_formats:\n date_time_formats.append(df + ' ' + tf)\n\n # Add date-only formats\n for df in date_formats:\n date_time_formats.append(df)\n\n # Add time-only formats\n for tf in time_formats:\n date_time_formats.append(tf)\n\n # (format, compiled_regexp) for each supported format\n format_regexps = []\n for dt_format in date_time_formats:\n format, regexp = format_regexp(dt_format)\n # Compile the regexp\n format_regexps.append(\n (format, re.compile(regexp, re.IGNORECASE))\n )\n\n return format_regexps\n\n\ndef guess_format(string):\n \"\"\"Try to guess the date/time format of ``string``, or raise a\n `CannotParse` exception.\n\n Examples::\n\n >>> guess_format('2010/01/28 13:25:49')\n '%Y/%m/%d %H:%M:%S'\n\n >>> guess_format('01/28/10 1:25:49 PM')\n '%m/%d/%y %I:%M:%S %p'\n\n >>> guess_format('01/28/2010 13:25:49.123')\n '%m/%d/%Y %H:%M:%S.%f'\n\n >>> guess_format('Aug 15 2009 15:24')\n '%b %d %Y %H:%M'\n\n >>> guess_format('3-14-15 9:26:53.589')\n '%m-%d-%y %H:%M:%S.%f'\n\n Leading and trailing text may be present::\n\n >>> guess_format('FOO April 1, 2007 3:45 PM BAR')\n '%B %d, %Y %I:%M %p'\n\n >>> guess_format('[[2010-09-25 14:19:24]]')\n '%Y-%m-%d %H:%M:%S'\n\n \"\"\"\n format_regexps = _compiled_format_regexps(_date_formats, _time_formats)\n for format, regexp in format_regexps:\n if regexp.search(string):\n return format\n # Nothing matched\n raise CannotParse(\"Could not guess date/time format in: %s\" % string)\n\n\ndef guess_file_date_format(filename):\n \"\"\"Open the given file and use `guess_format` to look for a\n date/time at the beginning of each line. Return the format string for\n the first one that's found. Raise `CannotParse` if none is found.\n \"\"\"\n for line in open(filename):\n try:\n format = guess_format(line)\n except CannotParse:\n pass\n else:\n return format\n\n raise CannotParse(\"No date/time strings found in '%s'\" % filename)\n\n\ndef date_chop(line, dateformat='%m/%d/%y %I:%M:%S %p', resolution=60):\n \"\"\"Given a ``line`` of text, get a date/time formatted as ``dateformat``,\n and return a `datetime` object rounded to the nearest ``resolution``\n seconds. If ``line`` fails to match ``dateformat``, a `CannotParse`\n exception is raised.\n\n Examples::\n\n >>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 60)\n datetime.datetime(1976, 5, 19, 12, 5)\n\n >>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 3600)\n datetime.datetime(1976, 5, 19, 12, 0)\n\n \"\"\"\n timestamp = parse(line, dateformat)\n # Round the timestamp to the given resolution\n # First convert to seconds-since-epoch\n epoch_seconds = int(time.mktime(timestamp.timetuple()))\n # Then do integer division to truncate\n rounded_seconds = (epoch_seconds / resolution) * resolution\n # Convert back to a datetime\n return dt.datetime.fromtimestamp(rounded_seconds)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pandas as pd;
import time;
import matplotlib.pyplot as plt;
import matplotlib.cm as cm
import matplotlib.patches as mpatch;
import numpy as np;
import sys;
sys.path.append("/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test")
import bettersankey as bsk;
datapath = "/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/"
print("reading...")
acttable = pd.read_csv(datapath + "atusact_2015/atusact_2015.dat")
infotable = pd.read_csv(datapath + "atusresp_2015/atusresp_2015.dat")
print("joining...")
jointable = pd.merge(acttable,infotable,on='TUCASEID')
#tiermode='TRTIER2'
tiermode='TRCODE'
#columns=['case','day','hour','origin','dest','corigin','cdest']
trans = pd.DataFrame();
print("processing...")
trans['case'] = jointable['TUCASEID']
trans['caseshift'] = jointable['TUCASEID'].shift(-1)
trans['step'] = jointable['TUACTIVITY_N']
trans['day'] = jointable['TUDIARYDAY']
trans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x/60.0))
trans['origin'] = jointable[tiermode]
trans['dest'] = jointable[tiermode].shift(-1)
trans['corigin'] = jointable.apply((lambda x: (x['TUCC5'] == 1) or (x['TUCC5B'] == 1) or (x['TUCC7'] == 1) or (x['TUCC8'] == 1)),axis=1)
trans['cdest'] = trans['corigin'].shift(-1)
trans = trans[trans.caseshift.notnull()]
trans['caseshift'] = trans['caseshift'].apply(lambda x:int(x))
trans['dest'] = trans['dest'].apply(lambda x:int(x))
trans = trans[trans.case == trans.caseshift]
trans.drop('caseshift',axis=1,inplace =True)
trans.to_csv(datapath + "transitions.csv");
print(len(set(trans['dest'])));
s = trans.groupby(['origin','dest']).size()
# s.to_csv(datapath + "transitioncounts.csv")
print("plotting...")
v = s.unstack().as_matrix();
v[np.isnan(v)] = 0.0;
logv = np.log10(v);
logv[np.isneginf(logv)] = 0.0;
print("Max value:", np.max(v), " (",np.max(logv),")")
plt.pcolormesh(logv,cmap='Blues');
plt.colorbar();
plt.yticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0])
plt.xticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0],rotation=45);
plt.show()
exit();
|
normal
|
{
"blob_id": "07b6ded9b4841bdba62d481664a399f0b125fcbf",
"index": 7876,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test')\n<mask token>\nprint('reading...')\n<mask token>\nprint('joining...')\n<mask token>\nprint('processing...')\n<mask token>\ntrans.drop('caseshift', axis=1, inplace=True)\ntrans.to_csv(datapath + 'transitions.csv')\nprint(len(set(trans['dest'])))\n<mask token>\nprint('plotting...')\n<mask token>\nprint('Max value:', np.max(v), ' (', np.max(logv), ')')\nplt.pcolormesh(logv, cmap='Blues')\nplt.colorbar()\nplt.yticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0])\nplt.xticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0],\n rotation=45)\nplt.show()\nexit()\n",
"step-3": "<mask token>\nsys.path.append('/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test')\n<mask token>\ndatapath = '/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/'\nprint('reading...')\nacttable = pd.read_csv(datapath + 'atusact_2015/atusact_2015.dat')\ninfotable = pd.read_csv(datapath + 'atusresp_2015/atusresp_2015.dat')\nprint('joining...')\njointable = pd.merge(acttable, infotable, on='TUCASEID')\ntiermode = 'TRCODE'\ntrans = pd.DataFrame()\nprint('processing...')\ntrans['case'] = jointable['TUCASEID']\ntrans['caseshift'] = jointable['TUCASEID'].shift(-1)\ntrans['step'] = jointable['TUACTIVITY_N']\ntrans['day'] = jointable['TUDIARYDAY']\ntrans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x / 60.0))\ntrans['origin'] = jointable[tiermode]\ntrans['dest'] = jointable[tiermode].shift(-1)\ntrans['corigin'] = jointable.apply(lambda x: x['TUCC5'] == 1 or x['TUCC5B'] ==\n 1 or x['TUCC7'] == 1 or x['TUCC8'] == 1, axis=1)\ntrans['cdest'] = trans['corigin'].shift(-1)\ntrans = trans[trans.caseshift.notnull()]\ntrans['caseshift'] = trans['caseshift'].apply(lambda x: int(x))\ntrans['dest'] = trans['dest'].apply(lambda x: int(x))\ntrans = trans[trans.case == trans.caseshift]\ntrans.drop('caseshift', axis=1, inplace=True)\ntrans.to_csv(datapath + 'transitions.csv')\nprint(len(set(trans['dest'])))\ns = trans.groupby(['origin', 'dest']).size()\nprint('plotting...')\nv = s.unstack().as_matrix()\nv[np.isnan(v)] = 0.0\nlogv = np.log10(v)\nlogv[np.isneginf(logv)] = 0.0\nprint('Max value:', np.max(v), ' (', np.max(logv), ')')\nplt.pcolormesh(logv, cmap='Blues')\nplt.colorbar()\nplt.yticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0])\nplt.xticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0],\n rotation=45)\nplt.show()\nexit()\n",
"step-4": "import pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.patches as mpatch\nimport numpy as np\nimport sys\nsys.path.append('/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test')\nimport bettersankey as bsk\ndatapath = '/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/'\nprint('reading...')\nacttable = pd.read_csv(datapath + 'atusact_2015/atusact_2015.dat')\ninfotable = pd.read_csv(datapath + 'atusresp_2015/atusresp_2015.dat')\nprint('joining...')\njointable = pd.merge(acttable, infotable, on='TUCASEID')\ntiermode = 'TRCODE'\ntrans = pd.DataFrame()\nprint('processing...')\ntrans['case'] = jointable['TUCASEID']\ntrans['caseshift'] = jointable['TUCASEID'].shift(-1)\ntrans['step'] = jointable['TUACTIVITY_N']\ntrans['day'] = jointable['TUDIARYDAY']\ntrans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x / 60.0))\ntrans['origin'] = jointable[tiermode]\ntrans['dest'] = jointable[tiermode].shift(-1)\ntrans['corigin'] = jointable.apply(lambda x: x['TUCC5'] == 1 or x['TUCC5B'] ==\n 1 or x['TUCC7'] == 1 or x['TUCC8'] == 1, axis=1)\ntrans['cdest'] = trans['corigin'].shift(-1)\ntrans = trans[trans.caseshift.notnull()]\ntrans['caseshift'] = trans['caseshift'].apply(lambda x: int(x))\ntrans['dest'] = trans['dest'].apply(lambda x: int(x))\ntrans = trans[trans.case == trans.caseshift]\ntrans.drop('caseshift', axis=1, inplace=True)\ntrans.to_csv(datapath + 'transitions.csv')\nprint(len(set(trans['dest'])))\ns = trans.groupby(['origin', 'dest']).size()\nprint('plotting...')\nv = s.unstack().as_matrix()\nv[np.isnan(v)] = 0.0\nlogv = np.log10(v)\nlogv[np.isneginf(logv)] = 0.0\nprint('Max value:', np.max(v), ' (', np.max(logv), ')')\nplt.pcolormesh(logv, cmap='Blues')\nplt.colorbar()\nplt.yticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0])\nplt.xticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0],\n rotation=45)\nplt.show()\nexit()\n",
"step-5": "import pandas as pd;\nimport time;\nimport matplotlib.pyplot as plt;\nimport matplotlib.cm as cm\nimport matplotlib.patches as mpatch;\nimport numpy as np;\nimport sys;\n\nsys.path.append(\"/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test\")\nimport bettersankey as bsk;\n\n\ndatapath = \"/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/\"\n\nprint(\"reading...\")\nacttable = pd.read_csv(datapath + \"atusact_2015/atusact_2015.dat\")\ninfotable = pd.read_csv(datapath + \"atusresp_2015/atusresp_2015.dat\")\nprint(\"joining...\")\njointable = pd.merge(acttable,infotable,on='TUCASEID')\n\n#tiermode='TRTIER2'\ntiermode='TRCODE'\n\n#columns=['case','day','hour','origin','dest','corigin','cdest']\ntrans = pd.DataFrame();\n\nprint(\"processing...\")\n\ntrans['case'] = jointable['TUCASEID']\ntrans['caseshift'] = jointable['TUCASEID'].shift(-1)\ntrans['step'] = jointable['TUACTIVITY_N']\ntrans['day'] = jointable['TUDIARYDAY']\ntrans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x/60.0))\ntrans['origin'] = jointable[tiermode]\ntrans['dest'] = jointable[tiermode].shift(-1)\ntrans['corigin'] = jointable.apply((lambda x: (x['TUCC5'] == 1) or (x['TUCC5B'] == 1) or (x['TUCC7'] == 1) or (x['TUCC8'] == 1)),axis=1)\ntrans['cdest'] = trans['corigin'].shift(-1)\n\ntrans = trans[trans.caseshift.notnull()]\n\ntrans['caseshift'] = trans['caseshift'].apply(lambda x:int(x))\ntrans['dest'] = trans['dest'].apply(lambda x:int(x))\n\ntrans = trans[trans.case == trans.caseshift]\ntrans.drop('caseshift',axis=1,inplace =True)\n\ntrans.to_csv(datapath + \"transitions.csv\");\n\nprint(len(set(trans['dest'])));\n\ns = trans.groupby(['origin','dest']).size()\n\n# s.to_csv(datapath + \"transitioncounts.csv\")\n\nprint(\"plotting...\")\n\nv = s.unstack().as_matrix();\nv[np.isnan(v)] = 0.0;\nlogv = np.log10(v);\nlogv[np.isneginf(logv)] = 0.0;\n\nprint(\"Max value:\", np.max(v), \" (\",np.max(logv),\")\")\n\nplt.pcolormesh(logv,cmap='Blues');\nplt.colorbar();\nplt.yticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0])\nplt.xticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0],rotation=45);\n\nplt.show()\n\nexit();\n\t\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -------------------------------
# --------- Set Methods ---------
# -------------------------------
# difference() return the values in the first set that not in the second set
set1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set2 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set1)
print(set2)
print(set1.difference(set2))
print(set1-set2)
print(set2.difference(set1))
print(set2-set1)
print(set1)
print(set2)
print("*" * 40)
# difference_update() return the values in the first set that not in the second set
# and update the value for the first set with this values
set3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set4 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set3)
set3.difference_update(set4)
print(set3)
print("*" * 40)
# intersection() return the values in the first set and in the second set
set5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set6 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set5)
print(set5.intersection(set6))
print(set5)
print("*" * 40)
# intersection_update() return the values in the first set and in the second set
# and update the value for the first set with this values
set7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set8 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set7)
set7.intersection_update(set8)
print(set7)
print("*" * 40)
# symmetric_difference() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
set9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set10 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set9)
print(set9.symmetric_difference(set10))
print(set9^set10)
print(set9)
print("*" * 40)
# symmetric_difference_update() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
# and update the value for the first set with this values
set11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set12 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set11)
set11.symmetric_difference_update(set12)
print(set11)
print("*" * 40)
|
normal
|
{
"blob_id": "faf2f5da92cf45cfedda91955688b3ca1c7c0db9",
"index": 8280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1 - set2)\nprint(set2.difference(set1))\nprint(set2 - set1)\nprint(set1)\nprint(set2)\nprint('*' * 40)\n<mask token>\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint('*' * 40)\n<mask token>\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint('*' * 40)\n<mask token>\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint('*' * 40)\n<mask token>\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9 ^ set10)\nprint(set9)\nprint('*' * 40)\n<mask token>\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint('*' * 40)\n",
"step-3": "set1 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset2 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1 - set2)\nprint(set2.difference(set1))\nprint(set2 - set1)\nprint(set1)\nprint(set2)\nprint('*' * 40)\nset3 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset4 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint('*' * 40)\nset5 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset6 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint('*' * 40)\nset7 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset8 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint('*' * 40)\nset9 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset10 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9 ^ set10)\nprint(set9)\nprint('*' * 40)\nset11 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset12 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint('*' * 40)\n",
"step-4": "# -------------------------------\n# --------- Set Methods ---------\n# -------------------------------\n\n\n# difference() return the values in the first set that not in the second set\nset1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset2 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1-set2)\nprint(set2.difference(set1))\nprint(set2-set1)\nprint(set1)\nprint(set2)\n\nprint(\"*\" * 40)\n\n# difference_update() return the values in the first set that not in the second set\n# and update the value for the first set with this values\nset3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset4 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint(\"*\" * 40)\n\n# intersection() return the values in the first set and in the second set\nset5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset6 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint(\"*\" * 40)\n\n# intersection_update() return the values in the first set and in the second set\n# and update the value for the first set with this values\nset7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset8 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint(\"*\" * 40)\n\n# symmetric_difference() return the values in the first set and not in the second set\n# and the values in the second set and not in the first set\nset9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset10 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9^set10)\nprint(set9)\nprint(\"*\" * 40)\n\n# symmetric_difference_update() return the values in the first set and not in the second set\n# and the values in the second set and not in the first set\n# and update the value for the first set with this values\nset11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset12 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint(\"*\" * 40)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import paho.mqtt.client as mqtt
# Fetch the service account key JSON file contents
cred = credentials.Certificate('iot_mikro.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://mikro-b4844.firebaseio.com/'
})
ref = db.reference('lampu')
print(ref.get())
i=0
while True:
print(ref.get())
if ref.get()=="Off" and i==0 :
i=1
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "Off")
if ref.get()=="On" and i==1 :
i=0
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "On")
# client.disconnect();
|
normal
|
{
"blob_id": "acff8618754658104ac36214901d346447a0134f",
"index": 811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\n<mask token>\nprint(ref.get())\n<mask token>\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-3": "<mask token>\ncred = credentials.Certificate('iot_mikro.json')\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\nref = db.reference('lampu')\nprint(ref.get())\ni = 0\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-4": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport paho.mqtt.client as mqtt\ncred = credentials.Certificate('iot_mikro.json')\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\nref = db.reference('lampu')\nprint(ref.get())\ni = 0\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-5": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport paho.mqtt.client as mqtt\n\n# Fetch the service account key JSON file contents\ncred = credentials.Certificate('iot_mikro.json')\n# Initialize the app with a service account, granting admin privileges\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://mikro-b4844.firebaseio.com/'\n})\n\nref = db.reference('lampu')\nprint(ref.get())\ni=0\nwhile True:\n print(ref.get())\n if ref.get()==\"Off\" and i==0 :\n i=1\n client = mqtt.Client()\n client.connect(\"127.0.0.1\",1883,60)\n client.publish(\"building/lampu\", \"Off\")\n if ref.get()==\"On\" and i==1 :\n i=0\n client = mqtt.Client()\n client.connect(\"127.0.0.1\",1883,60)\n client.publish(\"building/lampu\", \"On\")\n# client.disconnect();\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import request
from flask_restful import abort
from sqlalchemy.exc import SQLAlchemyError
from gm.main.models.model import db, Metric, QuantModelMetricSchema, \
MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, \
ThresholdType
from gm.main.resources import success, get_metric_by_id, BaseResource
class MetricsResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def get(self):
"""
Implements the GET method for endpoint "/metrics". By default the results are
order by 'metric_id' ascending.
Implemented Query Parameters:
- is_active: to filter results that are either active or inactive. Boolean and
case insensitive.
- frequency: filter results based on a metric frequency. Values of this enum must
be respected. Case insensitive.
- threshold_type: filter results based on a metric threshold type. Values of this
enum must be respected. Case insensitive.
- sort: allows one to order the resulting collecting by 'metric_id' in descending
order. This should be done by specifying the query parameter as "sort=-metric_id".
Case insensitive.
Note: if unknown query parameters are given these will be ignored.
:return: a collection of metrics
"""
query = self.build_query()
metrics = query.all()
result = self.schema_collection.dump(metrics)
return success(result)
def build_query(self):
"""
Builds the query (without executing it) to the be used in the GET method.
:return: query with all the query conditions specified for obtaining the metrics
that are in the database and respect the desired filters (query parameters).
"""
# this filter is required
query = Metric.query.filter(Metric.metric_type == self.metric_type)
# get query parameters (parameters which are not here are ignored)
is_active = request.args.get('is_active')
frequency = request.args.get('frequency')
threshold_type = request.args.get('threshold_type')
sort = request.args.get('sort')
# process each parameter, and if valid add it as a query condition
if is_active is not None:
is_active = is_active.lower() == 'true'
query = Metric.query.filter_by(is_active=is_active)
if frequency is not None:
try:
frequency = Frequency.from_name(frequency)
except ValueError as e:
msg = f"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}"
abort(400, message=msg)
query = query.filter_by(frequency=frequency)
if threshold_type is not None:
try:
threshold_type = ThresholdType.from_name(threshold_type)
except ValueError as e:
msg = f"Invalid 'threshold_type': {threshold_type}. Use one of " \
f"{ThresholdType.values()}"
abort(400, message=msg)
query = query.filter_by(threshold_type=threshold_type)
if sort is not None and sort.lstrip("-") == 'metric_id':
query = query.order_by(Metric.metric_id.desc())
else:
query = query.order_by(Metric.metric_id)
return query
def post(self):
"""
Implements the POST method for endpoint "/metrics". It should be used to create a
new metric.
:return: the metric as a json created in the database (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
# make sure the metric_id (temporary) and metric_type (model) are filled
json_data["metric_id"] = "TBD"
json_data["metric_type"] = "model"
# validate and deserialize input
new_metric = self.load(json_data, session=db.session)
# get the next metric id and update metric object
try:
db.session.add(new_metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
# dump to json and return result
result = self.schema.dump(new_metric)
return success(result, code=201)
class QuantModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- asset_class: to filter results by a given asset class.
- model_name: to filter results by a given model name.
- pricing_library: to filter results for a given pricing library.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this model endpoint.
"""
# build query from base class add required field for joining with parent
query = super().build_query()
query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)
# get the remaining query parameters
asset_class = request.args.get('asset_class')
model_name = request.args.get('model_name')
pricing_library = request.args.get('pricing_library')
# process each parameter and, if valid, add as a query condition
if asset_class is not None:
query = query.filter(QuantModelMetric.asset_class == asset_class)
if model_name is not None:
query = query.filter(QuantModelMetric.model_name == model_name)
if pricing_library is not None:
query = query.filter(QuantModelMetric.pricing_library == pricing_library)
return query
class MlModelMetricsResource(MetricsResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses almost everything from the base class, it only needs to specify the
appropriate schemas in the constructor, and to override the build_query method so that
the appropriate metric_type is filtered and the remaining query parameters (specific
to this endpoint) are processed.
Implemented Query Parameters:
- algorithm: to filter results by a given algorithm.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, POST
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
def build_query(self):
"""
Override method to include specific query parameters to this ml_model
endpoint.
"""
query = super().build_query()
query = query.filter(Metric.metric_id == MlModelMetric.metric_id)
algorithm = request.args.get('algorithm')
if algorithm is not None:
query = query.filter(MlModelMetric.algorithm == algorithm)
return query
class MetricResource(BaseResource):
"""
This resource handles the HTTP requests coming to the endpoint "/metrics/{metric_id}".
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def get(self, metric_id):
"""
Implements the GET method for endpoint "/metrics/{metric_id}". It should be used
to get a single metric from the database.
:param metric_id: the metric_id associated with this endpoint
:return: the json object of metric found in the database (if it exists)
"""
metric = get_metric_by_id(metric_id)
return self.schema.jsonify(metric)
def put(self, metric_id):
"""
Implements the PUT method for endpoint "/metrics/{metric_id}". It should be used
to update a metric.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the update (in case of success)
"""
json_data = request.get_json(force=True)
if not json_data:
abort(400, message='No input data provided')
# Validate and deserialize input
metric = get_metric_by_id(metric_id)
self.load(json_data, metric, db.session, partial=True)
# if it was found and deserialized successfully try to commit
try:
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(json_data)
def delete(self, metric_id):
"""
Implements the DELETE method for endpoint "/metrics/{metric_id}". It should be
used to delete a metric result matching the provided metric_id and cob_date.
:param metric_id: the metric_id associated with this endpoint
:return: the metric as a json after the delete (in case of success)
"""
metric = get_metric_by_id(metric_id)
# dump as json to send in the end if del is successful
result = self.schema.dump(metric)
# if result was found, delete it from database
try:
db.session.delete(metric)
db.session.commit()
except SQLAlchemyError as e:
abort(400, message=f'Database error. Reason: {e}')
return success(result)
class QuantModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/quant_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = QuantModelMetricSchema()
schema_collection = QuantModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
class MlModelMetricResource(MetricResource):
"""
This resource handles the HTTP requests coming to the endpoint
"/ml_model/metrics/{metric_id}".
This subclass uses everything from the base class and only needs to specify the
appropriate schemas in the constructor.
Note: no trailing slash ("/") should be used.
Accepted HTTP methods: GET, PUT, DELETE
"""
def __init__(self, **kwargs):
"""
Initialize schemas with appropriate classes.
:param kwargs: pass through to base constructor (service and metric_type)
"""
schema = MlModelMetricSchema()
schema_collection = MlModelMetricSchema(many=True)
super().__init__(schema, schema_collection, **kwargs)
|
normal
|
{
"blob_id": "1431a0049c05a99e0b68052f56bf8e2e3c48e1aa",
"index": 622,
"step-1": "<mask token>\n\n\nclass QuantModelMetricsResource(MetricsResource):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass QuantModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - asset_class: to filter results by a given asset class.\n - model_name: to filter results by a given model name.\n - pricing_library: to filter results for a given pricing library.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this model endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\n asset_class = request.args.get('asset_class')\n model_name = request.args.get('model_name')\n pricing_library = request.args.get('pricing_library')\n if asset_class is not None:\n query = query.filter(QuantModelMetric.asset_class == asset_class)\n if model_name is not None:\n query = query.filter(QuantModelMetric.model_name == model_name)\n if pricing_library is not None:\n query = query.filter(QuantModelMetric.pricing_library ==\n pricing_library)\n return query\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass MetricsResource(BaseResource):\n <mask token>\n\n def get(self):\n \"\"\"\n Implements the GET method for endpoint \"/metrics\". By default the results are\n order by 'metric_id' ascending.\n\n Implemented Query Parameters:\n - is_active: to filter results that are either active or inactive. Boolean and\n case insensitive.\n - frequency: filter results based on a metric frequency. Values of this enum must\n be respected. Case insensitive.\n - threshold_type: filter results based on a metric threshold type. Values of this\n enum must be respected. Case insensitive.\n - sort: allows one to order the resulting collecting by 'metric_id' in descending\n order. This should be done by specifying the query parameter as \"sort=-metric_id\".\n Case insensitive.\n\n Note: if unknown query parameters are given these will be ignored.\n\n :return: a collection of metrics\n \"\"\"\n query = self.build_query()\n metrics = query.all()\n result = self.schema_collection.dump(metrics)\n return success(result)\n\n def build_query(self):\n \"\"\"\n Builds the query (without executing it) to the be used in the GET method.\n :return: query with all the query conditions specified for obtaining the metrics\n that are in the database and respect the desired filters (query parameters).\n \"\"\"\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\n is_active = request.args.get('is_active')\n frequency = request.args.get('frequency')\n threshold_type = request.args.get('threshold_type')\n sort = request.args.get('sort')\n if is_active is not None:\n is_active = is_active.lower() == 'true'\n query = Metric.query.filter_by(is_active=is_active)\n if frequency is not None:\n try:\n frequency = Frequency.from_name(frequency)\n except ValueError as e:\n msg = (\n f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(frequency=frequency)\n if threshold_type is not None:\n try:\n threshold_type = ThresholdType.from_name(threshold_type)\n except ValueError as e:\n msg = (\n f\"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(threshold_type=threshold_type)\n if sort is not None and sort.lstrip('-') == 'metric_id':\n query = query.order_by(Metric.metric_id.desc())\n else:\n query = query.order_by(Metric.metric_id)\n return query\n\n def post(self):\n \"\"\"\n Implements the POST method for endpoint \"/metrics\". It should be used to create a\n new metric.\n\n :return: the metric as a json created in the database (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n json_data['metric_id'] = 'TBD'\n json_data['metric_type'] = 'model'\n new_metric = self.load(json_data, session=db.session)\n try:\n db.session.add(new_metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n result = self.schema.dump(new_metric)\n return success(result, code=201)\n\n\nclass QuantModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - asset_class: to filter results by a given asset class.\n - model_name: to filter results by a given model name.\n - pricing_library: to filter results for a given pricing library.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this model endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\n asset_class = request.args.get('asset_class')\n model_name = request.args.get('model_name')\n pricing_library = request.args.get('pricing_library')\n if asset_class is not None:\n query = query.filter(QuantModelMetric.asset_class == asset_class)\n if model_name is not None:\n query = query.filter(QuantModelMetric.model_name == model_name)\n if pricing_library is not None:\n query = query.filter(QuantModelMetric.pricing_library ==\n pricing_library)\n return query\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-4": "from flask import request\nfrom flask_restful import abort\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom gm.main.models.model import db, Metric, QuantModelMetricSchema, MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, ThresholdType\nfrom gm.main.resources import success, get_metric_by_id, BaseResource\n\n\nclass MetricsResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def get(self):\n \"\"\"\n Implements the GET method for endpoint \"/metrics\". By default the results are\n order by 'metric_id' ascending.\n\n Implemented Query Parameters:\n - is_active: to filter results that are either active or inactive. Boolean and\n case insensitive.\n - frequency: filter results based on a metric frequency. Values of this enum must\n be respected. Case insensitive.\n - threshold_type: filter results based on a metric threshold type. Values of this\n enum must be respected. Case insensitive.\n - sort: allows one to order the resulting collecting by 'metric_id' in descending\n order. This should be done by specifying the query parameter as \"sort=-metric_id\".\n Case insensitive.\n\n Note: if unknown query parameters are given these will be ignored.\n\n :return: a collection of metrics\n \"\"\"\n query = self.build_query()\n metrics = query.all()\n result = self.schema_collection.dump(metrics)\n return success(result)\n\n def build_query(self):\n \"\"\"\n Builds the query (without executing it) to the be used in the GET method.\n :return: query with all the query conditions specified for obtaining the metrics\n that are in the database and respect the desired filters (query parameters).\n \"\"\"\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\n is_active = request.args.get('is_active')\n frequency = request.args.get('frequency')\n threshold_type = request.args.get('threshold_type')\n sort = request.args.get('sort')\n if is_active is not None:\n is_active = is_active.lower() == 'true'\n query = Metric.query.filter_by(is_active=is_active)\n if frequency is not None:\n try:\n frequency = Frequency.from_name(frequency)\n except ValueError as e:\n msg = (\n f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(frequency=frequency)\n if threshold_type is not None:\n try:\n threshold_type = ThresholdType.from_name(threshold_type)\n except ValueError as e:\n msg = (\n f\"Invalid 'threshold_type': {threshold_type}. Use one of {ThresholdType.values()}\"\n )\n abort(400, message=msg)\n query = query.filter_by(threshold_type=threshold_type)\n if sort is not None and sort.lstrip('-') == 'metric_id':\n query = query.order_by(Metric.metric_id.desc())\n else:\n query = query.order_by(Metric.metric_id)\n return query\n\n def post(self):\n \"\"\"\n Implements the POST method for endpoint \"/metrics\". It should be used to create a\n new metric.\n\n :return: the metric as a json created in the database (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n json_data['metric_id'] = 'TBD'\n json_data['metric_type'] = 'model'\n new_metric = self.load(json_data, session=db.session)\n try:\n db.session.add(new_metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n result = self.schema.dump(new_metric)\n return success(result, code=201)\n\n\nclass QuantModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - asset_class: to filter results by a given asset class.\n - model_name: to filter results by a given model name.\n - pricing_library: to filter results for a given pricing library.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this model endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\n asset_class = request.args.get('asset_class')\n model_name = request.args.get('model_name')\n pricing_library = request.args.get('pricing_library')\n if asset_class is not None:\n query = query.filter(QuantModelMetric.asset_class == asset_class)\n if model_name is not None:\n query = query.filter(QuantModelMetric.model_name == model_name)\n if pricing_library is not None:\n query = query.filter(QuantModelMetric.pricing_library ==\n pricing_library)\n return query\n\n\nclass MlModelMetricsResource(MetricsResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses almost everything from the base class, it only needs to specify the\n appropriate schemas in the constructor, and to override the build_query method so that\n the appropriate metric_type is filtered and the remaining query parameters (specific\n to this endpoint) are processed.\n\n Implemented Query Parameters:\n - algorithm: to filter results by a given algorithm.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, POST\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n def build_query(self):\n \"\"\"\n Override method to include specific query parameters to this ml_model\n endpoint.\n \"\"\"\n query = super().build_query()\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\n algorithm = request.args.get('algorithm')\n if algorithm is not None:\n query = query.filter(MlModelMetric.algorithm == algorithm)\n return query\n\n\nclass MetricResource(BaseResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def get(self, metric_id):\n \"\"\"\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\n to get a single metric from the database.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the json object of metric found in the database (if it exists)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n return self.schema.jsonify(metric)\n\n def put(self, metric_id):\n \"\"\"\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\n to update a metric.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the update (in case of success)\n \"\"\"\n json_data = request.get_json(force=True)\n if not json_data:\n abort(400, message='No input data provided')\n metric = get_metric_by_id(metric_id)\n self.load(json_data, metric, db.session, partial=True)\n try:\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(json_data)\n\n def delete(self, metric_id):\n \"\"\"\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\n used to delete a metric result matching the provided metric_id and cob_date.\n\n :param metric_id: the metric_id associated with this endpoint\n :return: the metric as a json after the delete (in case of success)\n \"\"\"\n metric = get_metric_by_id(metric_id)\n result = self.schema.dump(metric)\n try:\n db.session.delete(metric)\n db.session.commit()\n except SQLAlchemyError as e:\n abort(400, message=f'Database error. Reason: {e}')\n return success(result)\n\n\nclass QuantModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/quant_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = QuantModelMetricSchema()\n schema_collection = QuantModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n\n\nclass MlModelMetricResource(MetricResource):\n \"\"\"\n This resource handles the HTTP requests coming to the endpoint\n \"/ml_model/metrics/{metric_id}\".\n\n This subclass uses everything from the base class and only needs to specify the\n appropriate schemas in the constructor.\n\n Note: no trailing slash (\"/\") should be used.\n\n Accepted HTTP methods: GET, PUT, DELETE\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize schemas with appropriate classes.\n\n :param kwargs: pass through to base constructor (service and metric_type)\n \"\"\"\n schema = MlModelMetricSchema()\n schema_collection = MlModelMetricSchema(many=True)\n super().__init__(schema, schema_collection, **kwargs)\n",
"step-5": "from flask import request\r\nfrom flask_restful import abort\r\nfrom sqlalchemy.exc import SQLAlchemyError\r\n\r\nfrom gm.main.models.model import db, Metric, QuantModelMetricSchema, \\\r\n MlModelMetricSchema, Frequency, QuantModelMetric, MlModelMetric, \\\r\n ThresholdType\r\nfrom gm.main.resources import success, get_metric_by_id, BaseResource\r\n\r\n\r\nclass MetricsResource(BaseResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint \"/metrics\".\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, POST\r\n \"\"\"\r\n\r\n def get(self):\r\n \"\"\"\r\n Implements the GET method for endpoint \"/metrics\". By default the results are\r\n order by 'metric_id' ascending.\r\n\r\n Implemented Query Parameters:\r\n - is_active: to filter results that are either active or inactive. Boolean and\r\n case insensitive.\r\n - frequency: filter results based on a metric frequency. Values of this enum must\r\n be respected. Case insensitive.\r\n - threshold_type: filter results based on a metric threshold type. Values of this\r\n enum must be respected. Case insensitive.\r\n - sort: allows one to order the resulting collecting by 'metric_id' in descending\r\n order. This should be done by specifying the query parameter as \"sort=-metric_id\".\r\n Case insensitive.\r\n\r\n Note: if unknown query parameters are given these will be ignored.\r\n\r\n :return: a collection of metrics\r\n \"\"\"\r\n query = self.build_query()\r\n metrics = query.all()\r\n result = self.schema_collection.dump(metrics)\r\n return success(result)\r\n\r\n def build_query(self):\r\n \"\"\"\r\n Builds the query (without executing it) to the be used in the GET method.\r\n :return: query with all the query conditions specified for obtaining the metrics\r\n that are in the database and respect the desired filters (query parameters).\r\n \"\"\"\r\n\r\n # this filter is required\r\n query = Metric.query.filter(Metric.metric_type == self.metric_type)\r\n\r\n # get query parameters (parameters which are not here are ignored)\r\n is_active = request.args.get('is_active')\r\n frequency = request.args.get('frequency')\r\n threshold_type = request.args.get('threshold_type')\r\n sort = request.args.get('sort')\r\n\r\n # process each parameter, and if valid add it as a query condition\r\n if is_active is not None:\r\n is_active = is_active.lower() == 'true'\r\n query = Metric.query.filter_by(is_active=is_active)\r\n if frequency is not None:\r\n try:\r\n frequency = Frequency.from_name(frequency)\r\n except ValueError as e:\r\n msg = f\"Invalid 'frequency': {frequency}. Use one of {Frequency.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(frequency=frequency)\r\n if threshold_type is not None:\r\n try:\r\n threshold_type = ThresholdType.from_name(threshold_type)\r\n except ValueError as e:\r\n msg = f\"Invalid 'threshold_type': {threshold_type}. Use one of \" \\\r\n f\"{ThresholdType.values()}\"\r\n abort(400, message=msg)\r\n query = query.filter_by(threshold_type=threshold_type)\r\n if sort is not None and sort.lstrip(\"-\") == 'metric_id':\r\n query = query.order_by(Metric.metric_id.desc())\r\n else:\r\n query = query.order_by(Metric.metric_id)\r\n\r\n return query\r\n\r\n\r\n def post(self):\r\n \"\"\"\r\n Implements the POST method for endpoint \"/metrics\". It should be used to create a\r\n new metric.\r\n\r\n :return: the metric as a json created in the database (in case of success)\r\n \"\"\"\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)\r\n\r\n\r\nclass QuantModelMetricsResource(MetricsResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/quant_model/metrics/{metric_id}\".\r\n\r\n This subclass uses almost everything from the base class, it only needs to specify the\r\n appropriate schemas in the constructor, and to override the build_query method so that\r\n the appropriate metric_type is filtered and the remaining query parameters (specific\r\n to this endpoint) are processed.\r\n\r\n Implemented Query Parameters:\r\n - asset_class: to filter results by a given asset class.\r\n - model_name: to filter results by a given model name.\r\n - pricing_library: to filter results for a given pricing library.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, POST\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = QuantModelMetricSchema()\r\n schema_collection = QuantModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)\r\n\r\n def build_query(self):\r\n \"\"\"\r\n Override method to include specific query parameters to this model endpoint.\r\n \"\"\"\r\n # build query from base class add required field for joining with parent\r\n query = super().build_query()\r\n query = query.filter(Metric.metric_id == QuantModelMetric.metric_id)\r\n\r\n # get the remaining query parameters\r\n asset_class = request.args.get('asset_class')\r\n model_name = request.args.get('model_name')\r\n pricing_library = request.args.get('pricing_library')\r\n\r\n # process each parameter and, if valid, add as a query condition\r\n if asset_class is not None:\r\n query = query.filter(QuantModelMetric.asset_class == asset_class)\r\n if model_name is not None:\r\n query = query.filter(QuantModelMetric.model_name == model_name)\r\n if pricing_library is not None:\r\n query = query.filter(QuantModelMetric.pricing_library == pricing_library)\r\n return query\r\n\r\n\r\nclass MlModelMetricsResource(MetricsResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/ml_model/metrics/{metric_id}\".\r\n\r\n This subclass uses almost everything from the base class, it only needs to specify the\r\n appropriate schemas in the constructor, and to override the build_query method so that\r\n the appropriate metric_type is filtered and the remaining query parameters (specific\r\n to this endpoint) are processed.\r\n\r\n Implemented Query Parameters:\r\n - algorithm: to filter results by a given algorithm.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, POST\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = MlModelMetricSchema()\r\n schema_collection = MlModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)\r\n\r\n def build_query(self):\r\n \"\"\"\r\n Override method to include specific query parameters to this ml_model\r\n endpoint.\r\n \"\"\"\r\n query = super().build_query()\r\n query = query.filter(Metric.metric_id == MlModelMetric.metric_id)\r\n algorithm = request.args.get('algorithm')\r\n if algorithm is not None:\r\n query = query.filter(MlModelMetric.algorithm == algorithm)\r\n return query\r\n\r\n\r\nclass MetricResource(BaseResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint \"/metrics/{metric_id}\".\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, PUT, DELETE\r\n \"\"\"\r\n\r\n def get(self, metric_id):\r\n \"\"\"\r\n Implements the GET method for endpoint \"/metrics/{metric_id}\". It should be used\r\n to get a single metric from the database.\r\n\r\n :param metric_id: the metric_id associated with this endpoint\r\n :return: the json object of metric found in the database (if it exists)\r\n \"\"\"\r\n metric = get_metric_by_id(metric_id)\r\n return self.schema.jsonify(metric)\r\n\r\n def put(self, metric_id):\r\n \"\"\"\r\n Implements the PUT method for endpoint \"/metrics/{metric_id}\". It should be used\r\n to update a metric.\r\n\r\n :param metric_id: the metric_id associated with this endpoint\r\n :return: the metric as a json after the update (in case of success)\r\n \"\"\"\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n\r\n # Validate and deserialize input\r\n metric = get_metric_by_id(metric_id)\r\n self.load(json_data, metric, db.session, partial=True)\r\n\r\n # if it was found and deserialized successfully try to commit\r\n try:\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n return success(json_data)\r\n\r\n def delete(self, metric_id):\r\n \"\"\"\r\n Implements the DELETE method for endpoint \"/metrics/{metric_id}\". It should be\r\n used to delete a metric result matching the provided metric_id and cob_date.\r\n\r\n :param metric_id: the metric_id associated with this endpoint\r\n :return: the metric as a json after the delete (in case of success)\r\n \"\"\"\r\n metric = get_metric_by_id(metric_id)\r\n # dump as json to send in the end if del is successful\r\n result = self.schema.dump(metric)\r\n\r\n # if result was found, delete it from database\r\n try:\r\n db.session.delete(metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n return success(result)\r\n\r\n\r\nclass QuantModelMetricResource(MetricResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/quant_model/metrics/{metric_id}\".\r\n\r\n This subclass uses everything from the base class and only needs to specify the\r\n appropriate schemas in the constructor.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, PUT, DELETE\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = QuantModelMetricSchema()\r\n schema_collection = QuantModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)\r\n\r\n\r\nclass MlModelMetricResource(MetricResource):\r\n \"\"\"\r\n This resource handles the HTTP requests coming to the endpoint\r\n \"/ml_model/metrics/{metric_id}\".\r\n\r\n This subclass uses everything from the base class and only needs to specify the\r\n appropriate schemas in the constructor.\r\n\r\n Note: no trailing slash (\"/\") should be used.\r\n\r\n Accepted HTTP methods: GET, PUT, DELETE\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n \"\"\"\r\n Initialize schemas with appropriate classes.\r\n\r\n :param kwargs: pass through to base constructor (service and metric_type)\r\n \"\"\"\r\n schema = MlModelMetricSchema()\r\n schema_collection = MlModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)",
"step-ids": [
16,
19,
23,
25,
26
]
}
|
[
16,
19,
23,
25,
26
] |
# coding=UTF-8
'''
Created on Jul 21, 2013
@author: jin
'''
from django import template
register = template.Library()
@register.filter
def get_list_number(value,num):
result=value[num]
return result
# register.filter('get_list_num', get_list_num)
'''
test
'''
if __name__=='__main__':
print get_list_number([True,False,False],1)
|
normal
|
{
"blob_id": "679d4b224733dbe264caeeda4e228edd090ea9de",
"index": 7797,
"step-1": "# coding=UTF-8\n'''\nCreated on Jul 21, 2013\n\n@author: jin\n'''\nfrom django import template\nregister = template.Library()\[email protected]\ndef get_list_number(value,num):\n result=value[num]\n return result\n# register.filter('get_list_num', get_list_num) \n\n'''\ntest\n'''\nif __name__=='__main__':\n print get_list_number([True,False,False],1)\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.contrib import admin
from .models import Advert, Category, ImageAd
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
filter_horizontal = "categories",
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
|
normal
|
{
"blob_id": "fdcee5b3f6b3ec170c9ef3017e0cc6c4b28cf22d",
"index": 454,
"step-1": "<mask token>\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-2": "<mask token>\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n <mask token>\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-3": "<mask token>\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = 'categories',\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-4": "from django.contrib import admin\nfrom .models import Advert, Category, ImageAd\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = 'categories',\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-5": "from django.contrib import admin\nfrom .models import Advert, Category, ImageAd\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = \"categories\",\n\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request, 'home.html')
def people(request):
return render(request, 'people.html')
def docs(request):
return render(request, 'docs.html')
def gallery(request, page=None):
if page:
return render(request, 'gallery_' + str(page) + '.html')
return render(request, 'gallery.html')
def publications(request):
return render(request, 'publications.html')
def access(request):
return render(request, 'access.html')
|
normal
|
{
"blob_id": "f7a493ab8e9845d0e9da33a0ee45d7c3ef66deb5",
"index": 7507,
"step-1": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\n<mask token>\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\n<mask token>\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\ndef access(request):\n return render(request, 'access.html')\n",
"step-3": "<mask token>\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\ndef gallery(request, page=None):\n if page:\n return render(request, 'gallery_' + str(page) + '.html')\n return render(request, 'gallery.html')\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\ndef access(request):\n return render(request, 'access.html')\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\ndef people(request):\n return render(request, 'people.html')\n\n\ndef docs(request):\n return render(request, 'docs.html')\n\n\ndef gallery(request, page=None):\n if page:\n return render(request, 'gallery_' + str(page) + '.html')\n return render(request, 'gallery.html')\n\n\ndef publications(request):\n return render(request, 'publications.html')\n\n\ndef access(request):\n return render(request, 'access.html')\n",
"step-5": null,
"step-ids": [
3,
4,
5,
7
]
}
|
[
3,
4,
5,
7
] |
#!/usr/bin/python
import serial
import time
import sys
senderId="\x01"
receiverId="\x00"
#openSerial just opens the serial connection
def openSerial(port):
#Some configuration for the serial port
ser = serial.Serial()
ser.baudrate = 300
ser.port = port
ser.bytesize = 8
ser.stopbits = 2
ser.open()
return ser
def initializePort(ser, payloadLen, sender, receiver, layerVersion="\x02"):
#SenderID
ser.write(sender)
#ReceiverID
ser.write(receiver)
#layerconfig: At the moment layer2
ser.write(layerVersion)
#payloadlen
ser.write(payloadLen)
#USART Protocol type: No one reads this field at the moment
ser.write("\x01")
def main():
if (len(sys.argv) < 2):
print "sender.py <port>"
sys.exit(1)
layerVersion = "\x02"
if (len(sys.argv) >= 3):
layerVersion = "\x03"
print "Use reliable transport"
ser = openSerial(sys.argv[1])
time.sleep(2)
initializePort(ser, payloadLen="\x01", sender="\x01", receiver="\x00", layerVersion=layerVersion)
time.sleep(0.5)
char = 1
while (char != "\x00"):
char = ser.read(1)
sys.stdout.write(char)
sys.stdout.flush()
print ""
while (char != "\x00"):
char = ser.read(1)
sys.stdout.write(char)
sys.stdout.flush()
ser.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "bf1d54015a9ae529f4fda4fa9b9f7c874ec3b240",
"index": 4514,
"step-1": "#!/usr/bin/python\n\nimport serial\nimport time\nimport sys\n\nsenderId=\"\\x01\"\nreceiverId=\"\\x00\"\n\n#openSerial just opens the serial connection\ndef openSerial(port):\n\t#Some configuration for the serial port\n\tser = serial.Serial()\n\tser.baudrate = 300\n\tser.port = port\n\tser.bytesize = 8\n\tser.stopbits = 2\n\tser.open()\n\treturn ser\n\ndef initializePort(ser, payloadLen, sender, receiver, layerVersion=\"\\x02\"):\n\t#SenderID\n\tser.write(sender)\n\t#ReceiverID\n\tser.write(receiver)\n\t#layerconfig: At the moment layer2\n\tser.write(layerVersion)\n\t#payloadlen\n\tser.write(payloadLen)\n\t#USART Protocol type: No one reads this field at the moment\n\tser.write(\"\\x01\")\n\ndef main():\n\tif (len(sys.argv) < 2):\n\t\tprint \"sender.py <port>\"\n\t\tsys.exit(1)\n\n\tlayerVersion = \"\\x02\"\n\tif (len(sys.argv) >= 3):\n\t\tlayerVersion = \"\\x03\"\n\t\tprint \"Use reliable transport\"\n\n\tser = openSerial(sys.argv[1])\n\ttime.sleep(2)\n\tinitializePort(ser, payloadLen=\"\\x01\", sender=\"\\x01\", receiver=\"\\x00\", layerVersion=layerVersion)\n\ttime.sleep(0.5)\n\tchar = 1\n\twhile (char != \"\\x00\"):\n\t\tchar = ser.read(1)\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\n\tprint \"\"\n\n\twhile (char != \"\\x00\"):\n\t\tchar = ser.read(1)\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\tser.close()\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
try:
a = int(input("Enter a:"))
b = int(input("Enter b:"))
c = a/b
except:
print("Can't divide with zero")
|
normal
|
{
"blob_id": "143f6ee38413a0713c18281e9737c09d9947a61a",
"index": 2805,
"step-1": "<mask token>\n",
"step-2": "try:\n a = int(input('Enter a:'))\n b = int(input('Enter b:'))\n c = a / b\nexcept:\n print(\"Can't divide with zero\")\n",
"step-3": "try:\n a = int(input(\"Enter a:\"))\n b = int(input(\"Enter b:\"))\n c = a/b\nexcept:\n print(\"Can't divide with zero\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Classifier(object):
"""
Trained classifier
"""
def __init__(self, classifier, scaler, orient, color_space,
pix_per_cell, cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
|
normal
|
{
"blob_id": "9188d58a6d9e832b8908b823d57249fcdd80ff51",
"index": 171,
"step-1": "<mask token>\n",
"step-2": "class Classifier(object):\n <mask token>\n <mask token>\n",
"step-3": "class Classifier(object):\n <mask token>\n\n def __init__(self, classifier, scaler, orient, color_space,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n \"\"\"\n Initializes an instance.\n Parameters\n ----------\n classifier : Trained SciPy classifier for detecting vehicles.\n scaler : SciPy scaler to apply to X.\n \"\"\"\n self.classifier = classifier\n self.scaler = scaler\n self.color_space = color_space\n self.orient = orient\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n",
"step-4": "class Classifier(object):\n \"\"\"\n Trained classifier\n \"\"\"\n\n def __init__(self, classifier, scaler, orient, color_space,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n \"\"\"\n Initializes an instance.\n Parameters\n ----------\n classifier : Trained SciPy classifier for detecting vehicles.\n scaler : SciPy scaler to apply to X.\n \"\"\"\n self.classifier = classifier\n self.scaler = scaler\n self.color_space = color_space\n self.orient = orient\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import hashlib
import json
#import logger
import Login.loger as logger
#configurations
import Configurations.config as config
def generate_data(*args):
#add data into seperate variables
try:
station_data = args[0]
except KeyError as e:
logger.log(log_type=config.log_error,params=e)
return None
#extract all variables from data
"""
There are the Parameters need to be extracted from the packet
Weather Parameters
1 - dateist
2 - dailyrainMM
3 - rain
4 - tempc
5 - winddir
6 - windspeedkmh
7 - humidity
8 - baromMM
Technical Parameters
1 - batt
2 - network
3 - RSSI
4 - action
5 - softwaretype
6 - version
"""
data_hashed = dict()
#data_hashed['dateist']=generate_id('dateist',station_data['station_id'])
data_hashed['dailyrainMM']=generate_id('dailyrainMM',station_data['station_id'])
data_hashed['rain']=generate_id('rain',station_data['station_id'])
data_hashed['tempc']=generate_id('tempc',station_data['station_id'])
data_hashed['winddir']=generate_id('winddir',station_data['station_id'])
data_hashed['windspeedkmh']=generate_id('windspeedkmh',station_data['station_id'])
data_hashed['humidity']=generate_id('humidity',station_data['station_id'])
data_hashed['baromMM']=generate_id('baromMM',station_data['station_id'])
data_hashed['BAT']=generate_id('BAT',station_data['station_id'])
data_hashed['network']=generate_id('network',station_data['station_id'])
data_hashed['RSSI']=generate_id('RSSI',station_data['station_id'])
data_hashed['action']=generate_id('action',station_data['station_id'])
data_hashed['softwareType']=generate_id('softwareType',station_data['station_id'])
data_hashed['version']=generate_id('version',station_data['station_id'])
return data_hashed
def generate_id(parameter,station_id):
meta_data= parameter+station_id
#generate all the keys for the has ids
hash_id = hashlib.sha256(config.encryption_key)
hash_id.update(json.dumps(meta_data).encode())
return hash_id.hexdigest()
|
normal
|
{
"blob_id": "2a5c6f442e6e6cec6c4663b764c8a9a15aec8c40",
"index": 6971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_id(parameter, station_id):\n meta_data = parameter + station_id\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n",
"step-3": "<mask token>\n\n\ndef generate_data(*args):\n try:\n station_data = args[0]\n except KeyError as e:\n logger.log(log_type=config.log_error, params=e)\n return None\n \"\"\"\n There are the Parameters need to be extracted from the packet\n \n Weather Parameters\n 1 - dateist\n 2 - dailyrainMM\n 3 - rain\n 4 - tempc\n 5 - winddir\n 6 - windspeedkmh\n 7 - humidity\n 8 - baromMM\n\n Technical Parameters\n 1 - batt\n 2 - network\n 3 - RSSI\n 4 - action\n 5 - softwaretype\n 6 - version\n \"\"\"\n data_hashed = dict()\n data_hashed['dailyrainMM'] = generate_id('dailyrainMM', station_data[\n 'station_id'])\n data_hashed['rain'] = generate_id('rain', station_data['station_id'])\n data_hashed['tempc'] = generate_id('tempc', station_data['station_id'])\n data_hashed['winddir'] = generate_id('winddir', station_data['station_id'])\n data_hashed['windspeedkmh'] = generate_id('windspeedkmh', station_data[\n 'station_id'])\n data_hashed['humidity'] = generate_id('humidity', station_data[\n 'station_id'])\n data_hashed['baromMM'] = generate_id('baromMM', station_data['station_id'])\n data_hashed['BAT'] = generate_id('BAT', station_data['station_id'])\n data_hashed['network'] = generate_id('network', station_data['station_id'])\n data_hashed['RSSI'] = generate_id('RSSI', station_data['station_id'])\n data_hashed['action'] = generate_id('action', station_data['station_id'])\n data_hashed['softwareType'] = generate_id('softwareType', station_data[\n 'station_id'])\n data_hashed['version'] = generate_id('version', station_data['station_id'])\n return data_hashed\n\n\ndef generate_id(parameter, station_id):\n meta_data = parameter + station_id\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n",
"step-4": "import hashlib\nimport json\nimport Login.loger as logger\nimport Configurations.config as config\n\n\ndef generate_data(*args):\n try:\n station_data = args[0]\n except KeyError as e:\n logger.log(log_type=config.log_error, params=e)\n return None\n \"\"\"\n There are the Parameters need to be extracted from the packet\n \n Weather Parameters\n 1 - dateist\n 2 - dailyrainMM\n 3 - rain\n 4 - tempc\n 5 - winddir\n 6 - windspeedkmh\n 7 - humidity\n 8 - baromMM\n\n Technical Parameters\n 1 - batt\n 2 - network\n 3 - RSSI\n 4 - action\n 5 - softwaretype\n 6 - version\n \"\"\"\n data_hashed = dict()\n data_hashed['dailyrainMM'] = generate_id('dailyrainMM', station_data[\n 'station_id'])\n data_hashed['rain'] = generate_id('rain', station_data['station_id'])\n data_hashed['tempc'] = generate_id('tempc', station_data['station_id'])\n data_hashed['winddir'] = generate_id('winddir', station_data['station_id'])\n data_hashed['windspeedkmh'] = generate_id('windspeedkmh', station_data[\n 'station_id'])\n data_hashed['humidity'] = generate_id('humidity', station_data[\n 'station_id'])\n data_hashed['baromMM'] = generate_id('baromMM', station_data['station_id'])\n data_hashed['BAT'] = generate_id('BAT', station_data['station_id'])\n data_hashed['network'] = generate_id('network', station_data['station_id'])\n data_hashed['RSSI'] = generate_id('RSSI', station_data['station_id'])\n data_hashed['action'] = generate_id('action', station_data['station_id'])\n data_hashed['softwareType'] = generate_id('softwareType', station_data[\n 'station_id'])\n data_hashed['version'] = generate_id('version', station_data['station_id'])\n return data_hashed\n\n\ndef generate_id(parameter, station_id):\n meta_data = parameter + station_id\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n",
"step-5": "import hashlib\nimport json\n#import logger \nimport Login.loger as logger\n#configurations\nimport Configurations.config as config\n\ndef generate_data(*args):\n #add data into seperate variables\n try:\n station_data = args[0]\n except KeyError as e:\n logger.log(log_type=config.log_error,params=e)\n return None\n #extract all variables from data\n \"\"\"\n There are the Parameters need to be extracted from the packet\n \n Weather Parameters\n 1 - dateist\n 2 - dailyrainMM\n 3 - rain\n 4 - tempc\n 5 - winddir\n 6 - windspeedkmh\n 7 - humidity\n 8 - baromMM\n\n Technical Parameters\n 1 - batt\n 2 - network\n 3 - RSSI\n 4 - action\n 5 - softwaretype\n 6 - version\n \"\"\"\n data_hashed = dict()\n #data_hashed['dateist']=generate_id('dateist',station_data['station_id'])\n data_hashed['dailyrainMM']=generate_id('dailyrainMM',station_data['station_id'])\n data_hashed['rain']=generate_id('rain',station_data['station_id'])\n data_hashed['tempc']=generate_id('tempc',station_data['station_id'])\n data_hashed['winddir']=generate_id('winddir',station_data['station_id'])\n data_hashed['windspeedkmh']=generate_id('windspeedkmh',station_data['station_id'])\n data_hashed['humidity']=generate_id('humidity',station_data['station_id'])\n data_hashed['baromMM']=generate_id('baromMM',station_data['station_id'])\n data_hashed['BAT']=generate_id('BAT',station_data['station_id'])\n data_hashed['network']=generate_id('network',station_data['station_id'])\n data_hashed['RSSI']=generate_id('RSSI',station_data['station_id'])\n data_hashed['action']=generate_id('action',station_data['station_id'])\n data_hashed['softwareType']=generate_id('softwareType',station_data['station_id'])\n data_hashed['version']=generate_id('version',station_data['station_id'])\n return data_hashed \n\n\n\n \ndef generate_id(parameter,station_id):\n meta_data= parameter+station_id\n #generate all the keys for the has ids\n hash_id = hashlib.sha256(config.encryption_key)\n hash_id.update(json.dumps(meta_data).encode())\n return hash_id.hexdigest()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import matplotlib.pyplot as plt
loansData = pd.read_csv('loansData.csv')
# Print the first 5 rows of each of the column to see what needs to be cleaned
print loansData['Interest.Rate'][0:5]
print loansData['Loan.Length'][0:5]
print loansData['FICO.Range'][0:5]
# Clean up the columns
loansData['Interest.Rate'] = loansData['Interest.Rate'].map(
lambda x: x.rstrip('%'))
loansData['Loan.Length'] = loansData['Loan.Length'].map(
lambda x: x.rstrip('months'))
# Print again to see if cleaning took place or not
print loansData['Interest.Rate'][0:5]
print loansData['Loan.Length'][0:5]
'''
convert the data in FICO Range into string and
split the string and take the lowest value.
'''
loansData['FICO.Score'] = loansData['FICO.Range'].astype(str)
print loansData['FICO.Score'][0:5]
loansData['FICO.Score'] = loansData['FICO.Score'].split()
print loansData['FICO.Score'][0:5]
loans_list = loansData['FICO.Score'].tolist()
FICO = []
for array in range(len(loans_list)):
loan = loans_list[array].split("-") # Split each sub-array on '-'
FICO.append(int(loan[0]))
loansData['FICO.Score'] = FICO
# Plot histogram
plt.figure()
p = loansData['FICO.Score'].hist()
plt.show()
# Create a scatterplot matrix
a = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10))
plt.show()
a = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10), diagonal='hist')
plt.show()
|
normal
|
{
"blob_id": "fc17b865815a7a5ec51f477a9fdda54667686eed",
"index": 1672,
"step-1": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nloansData = pd.read_csv('loansData.csv')\n\n# Print the first 5 rows of each of the column to see what needs to be cleaned\nprint loansData['Interest.Rate'][0:5]\nprint loansData['Loan.Length'][0:5]\nprint loansData['FICO.Range'][0:5]\n\n\n# Clean up the columns\nloansData['Interest.Rate'] = loansData['Interest.Rate'].map(\n lambda x: x.rstrip('%'))\nloansData['Loan.Length'] = loansData['Loan.Length'].map(\n lambda x: x.rstrip('months'))\n\n# Print again to see if cleaning took place or not\nprint loansData['Interest.Rate'][0:5]\nprint loansData['Loan.Length'][0:5]\n\n\n'''\nconvert the data in FICO Range into string and\nsplit the string and take the lowest value.\n'''\nloansData['FICO.Score'] = loansData['FICO.Range'].astype(str)\nprint loansData['FICO.Score'][0:5]\nloansData['FICO.Score'] = loansData['FICO.Score'].split()\nprint loansData['FICO.Score'][0:5]\n\n\nloans_list = loansData['FICO.Score'].tolist()\n\nFICO = []\nfor array in range(len(loans_list)):\n loan = loans_list[array].split(\"-\") # Split each sub-array on '-'\n FICO.append(int(loan[0]))\n\nloansData['FICO.Score'] = FICO\n\n# Plot histogram\nplt.figure()\np = loansData['FICO.Score'].hist()\nplt.show()\n\n# Create a scatterplot matrix\na = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10))\nplt.show()\n\na = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10), diagonal='hist')\nplt.show()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth import get_user_model
from tweets.models import Tweet
from ..models import UserProfile
User = get_user_model()
class TestAccountsViews(TestCase):
def setUp(self):
self.username = 'masterbdx'
self.email = '[email protected]'
self.password = '123456789'
self.user = User.objects.create_superuser(email=self.email,
username=self.username,
password=self.password,
subscribed=True
)
self.tweet = Tweet.objects.create(
user=self.user,
content='hello world',)
self.client = Client()
def test_login_view(self):
response = self.client.get(reverse('accounts:login'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/login.html')
def test_logout_view(self):
response = self.client.get(reverse('accounts:logout'))
self.assertEqual(response.status_code, 302)
def test_profile_view(self):
url = reverse('accounts:profile', kwargs={'user_slug': self.user.slug})
response = self.client.get(url)
self.assertTemplateUsed(response, 'accounts/profile.html')
self.assertEqual(response.status_code, 200)
def test_register_view(self):
url = reverse('accounts:register')
response = self.client.get(url)
self.assertTemplateUsed(response, 'accounts/register.html')
self.assertEqual(response.status_code, 200)
def test_userfollow_view(self):
url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(
email=self.email, password=self.password)
self.assertEqual(response.status_code, 302)
def test_follow_manager_view(self):
url = reverse('accounts:follow_manage', kwargs={
'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(
email=self.email, password=self.password)
self.assertEqual(response.status_code, 302)
def test_profile_update_view(self):
url = reverse('accounts:profile_update', kwargs={
'pk': self.user.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_followers_view(self):
url = reverse('accounts:followers', kwargs={
'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/followers.html')
def test_following_view(self):
url = reverse('accounts:following', kwargs={
'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/following.html')
def test_user_like_view(self):
url = reverse('accounts:user-like', kwargs={
'slug': self.user.slug, 'pk': self.tweet.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(
email=self.email, password=self.password)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_theme_view(self):
url = reverse('accounts:theme')
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.client.login(
email=self.email, password=self.password)
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
|
normal
|
{
"blob_id": "888a5847beca2470f4063da474da1f05079abca9",
"index": 5579,
"step-1": "<mask token>\n\n\nclass TestAccountsViews(TestCase):\n <mask token>\n <mask token>\n\n def test_logout_view(self):\n response = self.client.get(reverse('accounts:logout'))\n self.assertEqual(response.status_code, 302)\n <mask token>\n\n def test_register_view(self):\n url = reverse('accounts:register')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'accounts/register.html')\n self.assertEqual(response.status_code, 200)\n\n def test_userfollow_view(self):\n url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_follow_manager_view(self):\n url = reverse('accounts:follow_manage', kwargs={'user_slug': self.\n user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_profile_update_view(self):\n url = reverse('accounts:profile_update', kwargs={'pk': self.user.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n\n def test_followers_view(self):\n url = reverse('accounts:followers', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/followers.html')\n\n def test_following_view(self):\n url = reverse('accounts:following', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/following.html')\n\n def test_user_like_view(self):\n url = reverse('accounts:user-like', kwargs={'slug': self.user.slug,\n 'pk': self.tweet.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)\n self.client.login(email=self.email, password=self.password)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestAccountsViews(TestCase):\n <mask token>\n <mask token>\n\n def test_logout_view(self):\n response = self.client.get(reverse('accounts:logout'))\n self.assertEqual(response.status_code, 302)\n <mask token>\n\n def test_register_view(self):\n url = reverse('accounts:register')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'accounts/register.html')\n self.assertEqual(response.status_code, 200)\n\n def test_userfollow_view(self):\n url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_follow_manager_view(self):\n url = reverse('accounts:follow_manage', kwargs={'user_slug': self.\n user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_profile_update_view(self):\n url = reverse('accounts:profile_update', kwargs={'pk': self.user.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n\n def test_followers_view(self):\n url = reverse('accounts:followers', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/followers.html')\n\n def test_following_view(self):\n url = reverse('accounts:following', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/following.html')\n\n def test_user_like_view(self):\n url = reverse('accounts:user-like', kwargs={'slug': self.user.slug,\n 'pk': self.tweet.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)\n self.client.login(email=self.email, password=self.password)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_theme_view(self):\n url = reverse('accounts:theme')\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n",
"step-3": "<mask token>\n\n\nclass TestAccountsViews(TestCase):\n <mask token>\n\n def test_login_view(self):\n response = self.client.get(reverse('accounts:login'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/login.html')\n\n def test_logout_view(self):\n response = self.client.get(reverse('accounts:logout'))\n self.assertEqual(response.status_code, 302)\n <mask token>\n\n def test_register_view(self):\n url = reverse('accounts:register')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'accounts/register.html')\n self.assertEqual(response.status_code, 200)\n\n def test_userfollow_view(self):\n url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_follow_manager_view(self):\n url = reverse('accounts:follow_manage', kwargs={'user_slug': self.\n user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_profile_update_view(self):\n url = reverse('accounts:profile_update', kwargs={'pk': self.user.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n\n def test_followers_view(self):\n url = reverse('accounts:followers', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/followers.html')\n\n def test_following_view(self):\n url = reverse('accounts:following', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/following.html')\n\n def test_user_like_view(self):\n url = reverse('accounts:user-like', kwargs={'slug': self.user.slug,\n 'pk': self.tweet.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)\n self.client.login(email=self.email, password=self.password)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_theme_view(self):\n url = reverse('accounts:theme')\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n",
"step-4": "<mask token>\n\n\nclass TestAccountsViews(TestCase):\n\n def setUp(self):\n self.username = 'masterbdx'\n self.email = '[email protected]'\n self.password = '123456789'\n self.user = User.objects.create_superuser(email=self.email,\n username=self.username, password=self.password, subscribed=True)\n self.tweet = Tweet.objects.create(user=self.user, content='hello world'\n )\n self.client = Client()\n\n def test_login_view(self):\n response = self.client.get(reverse('accounts:login'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/login.html')\n\n def test_logout_view(self):\n response = self.client.get(reverse('accounts:logout'))\n self.assertEqual(response.status_code, 302)\n <mask token>\n\n def test_register_view(self):\n url = reverse('accounts:register')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'accounts/register.html')\n self.assertEqual(response.status_code, 200)\n\n def test_userfollow_view(self):\n url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_follow_manager_view(self):\n url = reverse('accounts:follow_manage', kwargs={'user_slug': self.\n user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_profile_update_view(self):\n url = reverse('accounts:profile_update', kwargs={'pk': self.user.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n\n def test_followers_view(self):\n url = reverse('accounts:followers', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/followers.html')\n\n def test_following_view(self):\n url = reverse('accounts:following', kwargs={'user_slug': self.user.\n slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/following.html')\n\n def test_user_like_view(self):\n url = reverse('accounts:user-like', kwargs={'slug': self.user.slug,\n 'pk': self.tweet.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)\n self.client.login(email=self.email, password=self.password)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_theme_view(self):\n url = reverse('accounts:theme')\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(email=self.email, password=self.password)\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n",
"step-5": "from django.test import TestCase, Client\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\n\nfrom tweets.models import Tweet\nfrom ..models import UserProfile\nUser = get_user_model()\n\n\nclass TestAccountsViews(TestCase):\n def setUp(self):\n self.username = 'masterbdx'\n self.email = '[email protected]'\n self.password = '123456789'\n self.user = User.objects.create_superuser(email=self.email,\n username=self.username,\n password=self.password,\n subscribed=True\n )\n\n self.tweet = Tweet.objects.create(\n user=self.user,\n content='hello world',)\n\n self.client = Client()\n\n def test_login_view(self):\n response = self.client.get(reverse('accounts:login'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/login.html')\n\n def test_logout_view(self):\n response = self.client.get(reverse('accounts:logout'))\n self.assertEqual(response.status_code, 302)\n\n def test_profile_view(self):\n url = reverse('accounts:profile', kwargs={'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'accounts/profile.html')\n self.assertEqual(response.status_code, 200)\n\n def test_register_view(self):\n url = reverse('accounts:register')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'accounts/register.html')\n self.assertEqual(response.status_code, 200)\n\n def test_userfollow_view(self):\n url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(\n email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_follow_manager_view(self):\n url = reverse('accounts:follow_manage', kwargs={\n 'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(\n email=self.email, password=self.password)\n self.assertEqual(response.status_code, 302)\n\n def test_profile_update_view(self):\n url = reverse('accounts:profile_update', kwargs={\n 'pk': self.user.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n\n def test_followers_view(self):\n url = reverse('accounts:followers', kwargs={\n 'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/followers.html')\n\n def test_following_view(self):\n url = reverse('accounts:following', kwargs={\n 'user_slug': self.user.slug})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'accounts/following.html')\n\n def test_user_like_view(self):\n url = reverse('accounts:user-like', kwargs={\n 'slug': self.user.slug, 'pk': self.tweet.id})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 403)\n self.client.login(\n email=self.email, password=self.password)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_theme_view(self):\n url = reverse('accounts:theme')\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n self.client.login(\n email=self.email, password=self.password)\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n",
"step-ids": [
9,
10,
11,
12,
16
]
}
|
[
9,
10,
11,
12,
16
] |
TABLE_NAME = 'active_module'
|
normal
|
{
"blob_id": "ff3962d875da8e3f9e6c3178b1a8191ebb8a7b60",
"index": 3639,
"step-1": "<mask token>\n",
"step-2": "TABLE_NAME = 'active_module'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
TODO: update description after everything (requirements) is (are) stable/concrete
Description: Script to extract KeepingTrac's creative names and send
team notification to start manual mapping as necessary.
This step must happen BEFORE the processing of deduping of RenTrak
creative names (step 2 in RenTrak processing).
"""
import pandas as pd
from mailer import Mailer
from vertica_utils import *
from s3_utils import *
def notify_for_manual_mapping(file, process_name):
email_str = """
<p>Python script extracted new creative names from KeepingTrac data.</p>
<p>To run the rest of the RenTrak ETL process smoothly, please do the followings:
<ol>
<li>download the attached file, <b>{0}</b>, from this email</li>
<li>fill up empty (nan/NULL) kt_creative mappings under column C (kt_creative_clean) in that file</b></li>
<li>upload the modified file to the S3 location below
<span style="color: red;">(replace any file with the same name in the S3 folder, if any)</span>:<br>
<b>diap.prod.us-east-1.target/RenTrak/CreativeCleaned</b>
</li>
<li>run this feed in DataVault: <b>InCampaign KT Creative Mappings</b></li>
<li><span style="color: red;">AFTER the DataVault feed successfully loaded the mappings</span>,
run this SQL in Vertica backend: <br>
<b>
UPDATE gaintheory_us_targetusa_14.incampaign_process_switches
SET run = 1
WHERE process_name = '{1}';
</b>
</li>
</ol>
</p>
<p><strong style="color: red;">NOTE: If you forget a step or more above, the second part of RenTrak processing
may not produce correct results.</strong></p>
""".format(file, process_name)
return email_str
def notify_no_new_mapping_found():
email_str = """
<p>Python script does not find any new creative names from keepingtrac data.
Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.
</p>
<p><b>No further action on your part is needed.</b></p>
"""
return email_str
def send_notification_email(recipients, subject, body, attachment=None):
Mailer().send_email(recipients, subject, body, attachment)
print("Notification email sent.")
# Function to extract data from vertica into a pandas dataframe
def vertica_extract(query, columns, index=None):
with vertica_python.connect(**conn_info) as connection:
cur = connection.cursor()
cur.execute(query)
results = pd.DataFrame(cur.fetchall())
results.columns = columns
if index:
return results.set_index(index)
else:
return results
def set_flag_value(table_name, schema_name, flag_name, value):
return """
UPDATE {1}.{0}
SET run = {3}
WHERE process_name = '{2}';
COMMIT;
""".format(table_name, schema_name, flag_name, value)
def set_lock(table_name, schema_name, flag_name, value):
with vertica_python.connect(**conn_info) as connection:
cur = connection.cursor()
cur.execute(set_flag_value(table_name, schema_name, flag_name, value))
connection.commit()
def main():
# start_date = (today - datetime.timedelta(weeks=6, days=1)).strftime('%Y-%m-%d')
schema_name = 'gaintheory_us_targetusa_14'
mapping_table = 'incampaign_kt_creative_mappings'
flag_table = 'incampaign_process_switches'
flag = 'rentrak_kt_creative_cleaned'
# Location of sources and destination files
output_folder = ROOT_FOLDER + 'RenTrak'
output_file = 'kt_creative_cleaned.xlsx'
if not os.path.exists(output_folder):
print("Creating a new local folder for export file:", output_folder)
os.makedirs(output_folder)
# Step 1: Download all possible KT combinations and current matching cleaned creative names
extract_query = """
SELECT Air_ISCI as kt_creative_id, Cmml_Title AS kt_creative, kt_creative_clean
FROM {1}.incampaign_keepingtrac_all a
LEFT JOIN {1}.{0} b
ON a.Air_ISCI = b.kt_creative_id
WHERE Air_ISCI IS NOT NULL
GROUP BY a.Air_ISCI, a.Cmml_Title, kt_creative_clean
ORDER BY kt_creative_id
""".format(mapping_table, schema_name)
df = vertica_extract(
extract_query,
['kt_creative_id', 'kt_creative', 'kt_creative_clean']
)
unmapped_creatives = df.isnull().sum()['kt_creative_clean'] # remove blank cells
if unmapped_creatives > 0:
print("Some unmapped kt_creatives found")
print("Acquiring process lock:", flag, "so that the second part of RenTrak processing cannot proceed")
set_lock(flag_table, schema_name, flag, 0)
file_to_export = os.path.join(output_folder, output_file)
df[df['kt_creative_clean'].isnull()].to_excel(file_to_export, index=False)
# Send email to tell the team to start manual mapping
subject = "RenTrak automated processing step 1: new kt_creatives need to be mapped"
body = notify_for_manual_mapping(output_file, flag)
send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body, file_to_export)
print("Notified the team to add manual mapping")
os.remove(file_to_export)
print("Deleted local file=>", file_to_export)
else:
print("Everything is mapped")
print("Releasing process lock:", flag, "so that the second part of RenTrak processing can proceed")
set_lock(flag_table, schema_name, flag, 1)
# insert, set flag to 1 and send email notification about being cleaned
subject = "RenTrak automated processing step 1: kt_creatives are all mapped. Step 2 will automatically commence."
body = notify_no_new_mapping_found()
send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body)
print("Notified the team that no further action on their part is required")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "71c6d5e385e3db8444d7ef8b0231e72db8538eb7",
"index": 8106,
"step-1": "<mask token>\n\n\ndef notify_no_new_mapping_found():\n email_str = \"\"\"\n <p>Python script does not find any new creative names from keepingtrac data.\n Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.\n </p>\n <p><b>No further action on your part is needed.</b></p>\n \"\"\"\n return email_str\n\n\n<mask token>\n\n\ndef vertica_extract(query, columns, index=None):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(query)\n results = pd.DataFrame(cur.fetchall())\n results.columns = columns\n if index:\n return results.set_index(index)\n else:\n return results\n\n\ndef set_flag_value(table_name, schema_name, flag_name, value):\n return (\n \"\"\"\n UPDATE {1}.{0}\n SET run = {3}\n WHERE process_name = '{2}';\n COMMIT;\n \"\"\"\n .format(table_name, schema_name, flag_name, value))\n\n\ndef set_lock(table_name, schema_name, flag_name, value):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(set_flag_value(table_name, schema_name, flag_name, value))\n connection.commit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef notify_no_new_mapping_found():\n email_str = \"\"\"\n <p>Python script does not find any new creative names from keepingtrac data.\n Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.\n </p>\n <p><b>No further action on your part is needed.</b></p>\n \"\"\"\n return email_str\n\n\ndef send_notification_email(recipients, subject, body, attachment=None):\n Mailer().send_email(recipients, subject, body, attachment)\n print('Notification email sent.')\n\n\ndef vertica_extract(query, columns, index=None):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(query)\n results = pd.DataFrame(cur.fetchall())\n results.columns = columns\n if index:\n return results.set_index(index)\n else:\n return results\n\n\ndef set_flag_value(table_name, schema_name, flag_name, value):\n return (\n \"\"\"\n UPDATE {1}.{0}\n SET run = {3}\n WHERE process_name = '{2}';\n COMMIT;\n \"\"\"\n .format(table_name, schema_name, flag_name, value))\n\n\ndef set_lock(table_name, schema_name, flag_name, value):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(set_flag_value(table_name, schema_name, flag_name, value))\n connection.commit()\n\n\ndef main():\n schema_name = 'gaintheory_us_targetusa_14'\n mapping_table = 'incampaign_kt_creative_mappings'\n flag_table = 'incampaign_process_switches'\n flag = 'rentrak_kt_creative_cleaned'\n output_folder = ROOT_FOLDER + 'RenTrak'\n output_file = 'kt_creative_cleaned.xlsx'\n if not os.path.exists(output_folder):\n print('Creating a new local folder for export file:', output_folder)\n os.makedirs(output_folder)\n extract_query = (\n \"\"\"\n SELECT Air_ISCI as kt_creative_id, Cmml_Title AS kt_creative, kt_creative_clean\n FROM {1}.incampaign_keepingtrac_all a\n LEFT JOIN {1}.{0} b\n ON a.Air_ISCI = b.kt_creative_id\n WHERE Air_ISCI IS NOT NULL\n GROUP BY a.Air_ISCI, a.Cmml_Title, kt_creative_clean\n ORDER BY kt_creative_id\n \"\"\"\n .format(mapping_table, schema_name))\n df = vertica_extract(extract_query, ['kt_creative_id', 'kt_creative',\n 'kt_creative_clean'])\n unmapped_creatives = df.isnull().sum()['kt_creative_clean']\n if unmapped_creatives > 0:\n print('Some unmapped kt_creatives found')\n print('Acquiring process lock:', flag,\n 'so that the second part of RenTrak processing cannot proceed')\n set_lock(flag_table, schema_name, flag, 0)\n file_to_export = os.path.join(output_folder, output_file)\n df[df['kt_creative_clean'].isnull()].to_excel(file_to_export, index\n =False)\n subject = (\n 'RenTrak automated processing step 1: new kt_creatives need to be mapped'\n )\n body = notify_for_manual_mapping(output_file, flag)\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body,\n file_to_export)\n print('Notified the team to add manual mapping')\n os.remove(file_to_export)\n print('Deleted local file=>', file_to_export)\n else:\n print('Everything is mapped')\n print('Releasing process lock:', flag,\n 'so that the second part of RenTrak processing can proceed')\n set_lock(flag_table, schema_name, flag, 1)\n subject = (\n 'RenTrak automated processing step 1: kt_creatives are all mapped. Step 2 will automatically commence.'\n )\n body = notify_no_new_mapping_found()\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body)\n print(\n 'Notified the team that no further action on their part is required'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef notify_for_manual_mapping(file, process_name):\n email_str = (\n \"\"\"\n <p>Python script extracted new creative names from KeepingTrac data.</p>\n <p>To run the rest of the RenTrak ETL process smoothly, please do the followings:\n <ol>\n <li>download the attached file, <b>{0}</b>, from this email</li>\n <li>fill up empty (nan/NULL) kt_creative mappings under column C (kt_creative_clean) in that file</b></li>\n <li>upload the modified file to the S3 location below\n <span style=\"color: red;\">(replace any file with the same name in the S3 folder, if any)</span>:<br>\n <b>diap.prod.us-east-1.target/RenTrak/CreativeCleaned</b>\n </li>\n <li>run this feed in DataVault: <b>InCampaign KT Creative Mappings</b></li>\n <li><span style=\"color: red;\">AFTER the DataVault feed successfully loaded the mappings</span>,\n run this SQL in Vertica backend: <br>\n <b>\n UPDATE gaintheory_us_targetusa_14.incampaign_process_switches\n SET run = 1\n WHERE process_name = '{1}';\n </b>\n </li>\n </ol>\n </p>\n <p><strong style=\"color: red;\">NOTE: If you forget a step or more above, the second part of RenTrak processing\n may not produce correct results.</strong></p>\n \"\"\"\n .format(file, process_name))\n return email_str\n\n\ndef notify_no_new_mapping_found():\n email_str = \"\"\"\n <p>Python script does not find any new creative names from keepingtrac data.\n Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.\n </p>\n <p><b>No further action on your part is needed.</b></p>\n \"\"\"\n return email_str\n\n\ndef send_notification_email(recipients, subject, body, attachment=None):\n Mailer().send_email(recipients, subject, body, attachment)\n print('Notification email sent.')\n\n\ndef vertica_extract(query, columns, index=None):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(query)\n results = pd.DataFrame(cur.fetchall())\n results.columns = columns\n if index:\n return results.set_index(index)\n else:\n return results\n\n\ndef set_flag_value(table_name, schema_name, flag_name, value):\n return (\n \"\"\"\n UPDATE {1}.{0}\n SET run = {3}\n WHERE process_name = '{2}';\n COMMIT;\n \"\"\"\n .format(table_name, schema_name, flag_name, value))\n\n\ndef set_lock(table_name, schema_name, flag_name, value):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(set_flag_value(table_name, schema_name, flag_name, value))\n connection.commit()\n\n\ndef main():\n schema_name = 'gaintheory_us_targetusa_14'\n mapping_table = 'incampaign_kt_creative_mappings'\n flag_table = 'incampaign_process_switches'\n flag = 'rentrak_kt_creative_cleaned'\n output_folder = ROOT_FOLDER + 'RenTrak'\n output_file = 'kt_creative_cleaned.xlsx'\n if not os.path.exists(output_folder):\n print('Creating a new local folder for export file:', output_folder)\n os.makedirs(output_folder)\n extract_query = (\n \"\"\"\n SELECT Air_ISCI as kt_creative_id, Cmml_Title AS kt_creative, kt_creative_clean\n FROM {1}.incampaign_keepingtrac_all a\n LEFT JOIN {1}.{0} b\n ON a.Air_ISCI = b.kt_creative_id\n WHERE Air_ISCI IS NOT NULL\n GROUP BY a.Air_ISCI, a.Cmml_Title, kt_creative_clean\n ORDER BY kt_creative_id\n \"\"\"\n .format(mapping_table, schema_name))\n df = vertica_extract(extract_query, ['kt_creative_id', 'kt_creative',\n 'kt_creative_clean'])\n unmapped_creatives = df.isnull().sum()['kt_creative_clean']\n if unmapped_creatives > 0:\n print('Some unmapped kt_creatives found')\n print('Acquiring process lock:', flag,\n 'so that the second part of RenTrak processing cannot proceed')\n set_lock(flag_table, schema_name, flag, 0)\n file_to_export = os.path.join(output_folder, output_file)\n df[df['kt_creative_clean'].isnull()].to_excel(file_to_export, index\n =False)\n subject = (\n 'RenTrak automated processing step 1: new kt_creatives need to be mapped'\n )\n body = notify_for_manual_mapping(output_file, flag)\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body,\n file_to_export)\n print('Notified the team to add manual mapping')\n os.remove(file_to_export)\n print('Deleted local file=>', file_to_export)\n else:\n print('Everything is mapped')\n print('Releasing process lock:', flag,\n 'so that the second part of RenTrak processing can proceed')\n set_lock(flag_table, schema_name, flag, 1)\n subject = (\n 'RenTrak automated processing step 1: kt_creatives are all mapped. Step 2 will automatically commence.'\n )\n body = notify_no_new_mapping_found()\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body)\n print(\n 'Notified the team that no further action on their part is required'\n )\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef notify_for_manual_mapping(file, process_name):\n email_str = (\n \"\"\"\n <p>Python script extracted new creative names from KeepingTrac data.</p>\n <p>To run the rest of the RenTrak ETL process smoothly, please do the followings:\n <ol>\n <li>download the attached file, <b>{0}</b>, from this email</li>\n <li>fill up empty (nan/NULL) kt_creative mappings under column C (kt_creative_clean) in that file</b></li>\n <li>upload the modified file to the S3 location below\n <span style=\"color: red;\">(replace any file with the same name in the S3 folder, if any)</span>:<br>\n <b>diap.prod.us-east-1.target/RenTrak/CreativeCleaned</b>\n </li>\n <li>run this feed in DataVault: <b>InCampaign KT Creative Mappings</b></li>\n <li><span style=\"color: red;\">AFTER the DataVault feed successfully loaded the mappings</span>,\n run this SQL in Vertica backend: <br>\n <b>\n UPDATE gaintheory_us_targetusa_14.incampaign_process_switches\n SET run = 1\n WHERE process_name = '{1}';\n </b>\n </li>\n </ol>\n </p>\n <p><strong style=\"color: red;\">NOTE: If you forget a step or more above, the second part of RenTrak processing\n may not produce correct results.</strong></p>\n \"\"\"\n .format(file, process_name))\n return email_str\n\n\ndef notify_no_new_mapping_found():\n email_str = \"\"\"\n <p>Python script does not find any new creative names from keepingtrac data.\n Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.\n </p>\n <p><b>No further action on your part is needed.</b></p>\n \"\"\"\n return email_str\n\n\ndef send_notification_email(recipients, subject, body, attachment=None):\n Mailer().send_email(recipients, subject, body, attachment)\n print('Notification email sent.')\n\n\ndef vertica_extract(query, columns, index=None):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(query)\n results = pd.DataFrame(cur.fetchall())\n results.columns = columns\n if index:\n return results.set_index(index)\n else:\n return results\n\n\ndef set_flag_value(table_name, schema_name, flag_name, value):\n return (\n \"\"\"\n UPDATE {1}.{0}\n SET run = {3}\n WHERE process_name = '{2}';\n COMMIT;\n \"\"\"\n .format(table_name, schema_name, flag_name, value))\n\n\ndef set_lock(table_name, schema_name, flag_name, value):\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(set_flag_value(table_name, schema_name, flag_name, value))\n connection.commit()\n\n\ndef main():\n schema_name = 'gaintheory_us_targetusa_14'\n mapping_table = 'incampaign_kt_creative_mappings'\n flag_table = 'incampaign_process_switches'\n flag = 'rentrak_kt_creative_cleaned'\n output_folder = ROOT_FOLDER + 'RenTrak'\n output_file = 'kt_creative_cleaned.xlsx'\n if not os.path.exists(output_folder):\n print('Creating a new local folder for export file:', output_folder)\n os.makedirs(output_folder)\n extract_query = (\n \"\"\"\n SELECT Air_ISCI as kt_creative_id, Cmml_Title AS kt_creative, kt_creative_clean\n FROM {1}.incampaign_keepingtrac_all a\n LEFT JOIN {1}.{0} b\n ON a.Air_ISCI = b.kt_creative_id\n WHERE Air_ISCI IS NOT NULL\n GROUP BY a.Air_ISCI, a.Cmml_Title, kt_creative_clean\n ORDER BY kt_creative_id\n \"\"\"\n .format(mapping_table, schema_name))\n df = vertica_extract(extract_query, ['kt_creative_id', 'kt_creative',\n 'kt_creative_clean'])\n unmapped_creatives = df.isnull().sum()['kt_creative_clean']\n if unmapped_creatives > 0:\n print('Some unmapped kt_creatives found')\n print('Acquiring process lock:', flag,\n 'so that the second part of RenTrak processing cannot proceed')\n set_lock(flag_table, schema_name, flag, 0)\n file_to_export = os.path.join(output_folder, output_file)\n df[df['kt_creative_clean'].isnull()].to_excel(file_to_export, index\n =False)\n subject = (\n 'RenTrak automated processing step 1: new kt_creatives need to be mapped'\n )\n body = notify_for_manual_mapping(output_file, flag)\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body,\n file_to_export)\n print('Notified the team to add manual mapping')\n os.remove(file_to_export)\n print('Deleted local file=>', file_to_export)\n else:\n print('Everything is mapped')\n print('Releasing process lock:', flag,\n 'so that the second part of RenTrak processing can proceed')\n set_lock(flag_table, schema_name, flag, 1)\n subject = (\n 'RenTrak automated processing step 1: kt_creatives are all mapped. Step 2 will automatically commence.'\n )\n body = notify_no_new_mapping_found()\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body)\n print(\n 'Notified the team that no further action on their part is required'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\r\nTODO: update description after everything (requirements) is (are) stable/concrete\r\nDescription: Script to extract KeepingTrac's creative names and send\r\nteam notification to start manual mapping as necessary.\r\n\r\nThis step must happen BEFORE the processing of deduping of RenTrak\r\ncreative names (step 2 in RenTrak processing).\r\n\"\"\"\r\nimport pandas as pd\r\n\r\nfrom mailer import Mailer\r\nfrom vertica_utils import *\r\nfrom s3_utils import *\r\n\r\n\r\ndef notify_for_manual_mapping(file, process_name):\r\n email_str = \"\"\"\r\n <p>Python script extracted new creative names from KeepingTrac data.</p>\r\n <p>To run the rest of the RenTrak ETL process smoothly, please do the followings:\r\n <ol>\r\n <li>download the attached file, <b>{0}</b>, from this email</li>\r\n <li>fill up empty (nan/NULL) kt_creative mappings under column C (kt_creative_clean) in that file</b></li>\r\n <li>upload the modified file to the S3 location below\r\n <span style=\"color: red;\">(replace any file with the same name in the S3 folder, if any)</span>:<br>\r\n <b>diap.prod.us-east-1.target/RenTrak/CreativeCleaned</b>\r\n </li>\r\n <li>run this feed in DataVault: <b>InCampaign KT Creative Mappings</b></li>\r\n <li><span style=\"color: red;\">AFTER the DataVault feed successfully loaded the mappings</span>,\r\n run this SQL in Vertica backend: <br>\r\n <b>\r\n UPDATE gaintheory_us_targetusa_14.incampaign_process_switches\r\n SET run = 1\r\n WHERE process_name = '{1}';\r\n </b>\r\n </li>\r\n </ol>\r\n </p>\r\n <p><strong style=\"color: red;\">NOTE: If you forget a step or more above, the second part of RenTrak processing\r\n may not produce correct results.</strong></p>\r\n \"\"\".format(file, process_name)\r\n return email_str\r\n\r\n\r\ndef notify_no_new_mapping_found():\r\n email_str = \"\"\"\r\n <p>Python script does not find any new creative names from keepingtrac data.\r\n Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.\r\n </p>\r\n <p><b>No further action on your part is needed.</b></p>\r\n \"\"\"\r\n return email_str\r\n\r\n\r\ndef send_notification_email(recipients, subject, body, attachment=None):\r\n Mailer().send_email(recipients, subject, body, attachment)\r\n print(\"Notification email sent.\")\r\n\r\n\r\n# Function to extract data from vertica into a pandas dataframe\r\ndef vertica_extract(query, columns, index=None):\r\n with vertica_python.connect(**conn_info) as connection:\r\n cur = connection.cursor()\r\n cur.execute(query)\r\n results = pd.DataFrame(cur.fetchall())\r\n results.columns = columns\r\n if index:\r\n return results.set_index(index)\r\n else:\r\n return results\r\n\r\n\r\ndef set_flag_value(table_name, schema_name, flag_name, value):\r\n return \"\"\"\r\n UPDATE {1}.{0}\r\n SET run = {3}\r\n WHERE process_name = '{2}';\r\n COMMIT;\r\n \"\"\".format(table_name, schema_name, flag_name, value)\r\n\r\n\r\ndef set_lock(table_name, schema_name, flag_name, value):\r\n with vertica_python.connect(**conn_info) as connection:\r\n cur = connection.cursor()\r\n cur.execute(set_flag_value(table_name, schema_name, flag_name, value))\r\n connection.commit()\r\n\r\n\r\ndef main():\r\n # start_date = (today - datetime.timedelta(weeks=6, days=1)).strftime('%Y-%m-%d')\r\n schema_name = 'gaintheory_us_targetusa_14'\r\n mapping_table = 'incampaign_kt_creative_mappings'\r\n flag_table = 'incampaign_process_switches'\r\n flag = 'rentrak_kt_creative_cleaned'\r\n\r\n # Location of sources and destination files\r\n output_folder = ROOT_FOLDER + 'RenTrak'\r\n output_file = 'kt_creative_cleaned.xlsx'\r\n\r\n if not os.path.exists(output_folder):\r\n print(\"Creating a new local folder for export file:\", output_folder)\r\n os.makedirs(output_folder)\r\n\r\n # Step 1: Download all possible KT combinations and current matching cleaned creative names\r\n extract_query = \"\"\"\r\n SELECT Air_ISCI as kt_creative_id, Cmml_Title AS kt_creative, kt_creative_clean\r\n FROM {1}.incampaign_keepingtrac_all a\r\n LEFT JOIN {1}.{0} b\r\n ON a.Air_ISCI = b.kt_creative_id\r\n WHERE Air_ISCI IS NOT NULL\r\n GROUP BY a.Air_ISCI, a.Cmml_Title, kt_creative_clean\r\n ORDER BY kt_creative_id\r\n \"\"\".format(mapping_table, schema_name)\r\n\r\n df = vertica_extract(\r\n extract_query,\r\n ['kt_creative_id', 'kt_creative', 'kt_creative_clean']\r\n )\r\n unmapped_creatives = df.isnull().sum()['kt_creative_clean'] # remove blank cells\r\n\r\n if unmapped_creatives > 0:\r\n print(\"Some unmapped kt_creatives found\")\r\n print(\"Acquiring process lock:\", flag, \"so that the second part of RenTrak processing cannot proceed\")\r\n set_lock(flag_table, schema_name, flag, 0)\r\n\r\n file_to_export = os.path.join(output_folder, output_file)\r\n df[df['kt_creative_clean'].isnull()].to_excel(file_to_export, index=False)\r\n\r\n # Send email to tell the team to start manual mapping\r\n subject = \"RenTrak automated processing step 1: new kt_creatives need to be mapped\"\r\n body = notify_for_manual_mapping(output_file, flag)\r\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body, file_to_export)\r\n print(\"Notified the team to add manual mapping\")\r\n\r\n os.remove(file_to_export)\r\n print(\"Deleted local file=>\", file_to_export)\r\n\r\n else:\r\n print(\"Everything is mapped\")\r\n print(\"Releasing process lock:\", flag, \"so that the second part of RenTrak processing can proceed\")\r\n set_lock(flag_table, schema_name, flag, 1)\r\n\r\n # insert, set flag to 1 and send email notification about being cleaned\r\n subject = \"RenTrak automated processing step 1: kt_creatives are all mapped. Step 2 will automatically commence.\"\r\n body = notify_no_new_mapping_found()\r\n send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body)\r\n print(\"Notified the team that no further action on their part is required\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from .proxies import Proxies
from .roles import Roles
from .products import Products
from .resourcefiles import ResourceFiles
class Apigee(object):
"""Provides easy access to all endpoint classes
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
"""
def __init__(self, org_name, username, password):
self.proxies = Proxies(org_name, username, password)
self.roles = Roles(org_name, username, password)
self.products = Products(org_name, username, password)
self.resourcefiles = ResourceFiles(org_name, username, password,
environment)
|
normal
|
{
"blob_id": "656927013d9a0254e2bc4cdf05b7cfd5947feb05",
"index": 7868,
"step-1": "<mask token>\n\n\nclass Apigee(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Apigee(object):\n <mask token>\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-3": "<mask token>\n\n\nclass Apigee(object):\n \"\"\"Provides easy access to all endpoint classes\n\n Args:\n domain (str): Your Auth0 domain, e.g: 'username.auth0.com'\n\n token (str): Management API v2 Token\n \"\"\"\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-4": "from .proxies import Proxies\nfrom .roles import Roles\nfrom .products import Products\nfrom .resourcefiles import ResourceFiles\n\n\nclass Apigee(object):\n \"\"\"Provides easy access to all endpoint classes\n\n Args:\n domain (str): Your Auth0 domain, e.g: 'username.auth0.com'\n\n token (str): Management API v2 Token\n \"\"\"\n\n def __init__(self, org_name, username, password):\n self.proxies = Proxies(org_name, username, password)\n self.roles = Roles(org_name, username, password)\n self.products = Products(org_name, username, password)\n self.resourcefiles = ResourceFiles(org_name, username, password,\n environment)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from django.db import models
from django.db.models.base import Model
# Create your models here.
class Categoria(models.Model):
categoria = models.CharField(max_length=40)
def __str__(self):
return self.categoria
class Producto(models.Model):
codigo = models.CharField(max_length=40)
nombre = models.CharField(max_length=40)
precio = models.IntegerField()
stock = models.IntegerField()
descripcion = models.CharField(max_length=40)
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
fecha = models.DateField()
imagen = models.ImageField(null=True, blank=True)
def __str__(self):
return self.nombre
class Cliente(models.Model):
nombre = models.CharField(max_length=41)
paterno = models.CharField(max_length=40)
rut = models.CharField(max_length=9)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
def __str__(self):
return self.nombre
class Usuario(models.Model):
mail = models.CharField(max_length=100)
contraseña = models.CharField(max_length=100)
rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)
def __str__(self):
return self.rut
class DetalleVenta(models.Model):
tipo_comprovante = models.CharField(max_length=100)
serie_comprovante = models.CharField(max_length=7)
fecha_comprovante = models.DateField(max_length=100)
iva = models.IntegerField()
total = models.IntegerField()
def __str__(self):
return self.serie_comprovante
class Descuento(models.Model):
codigo_descuento = models.CharField(max_length=7)
valor_descuento = models.IntegerField()
def __str__(self):
return self.codigo_descuento
class Venta(models.Model):
descripcion = models.CharField(max_length=100)
total_venta = models.IntegerField()
def __str__(self):
return self.total_venta
class Sucursal(models.Model):
direccion = models.CharField(max_length=100)
numero_sucursal = models.IntegerField()
def __str__(self):
return self.numero_sucursal
class Comuna(models.Model):
direccion = models.CharField(max_length=100)
numero_comuna = models.IntegerField()
numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)
def __ (self):
return self.numero_sucursal
class Region(models.Model):
direccion = models.CharField(max_length=100)
numero_region = models.IntegerField()
numero_comuna= models.ForeignKey(Comuna,on_delete=models.DO_NOTHING)
def __str__(self):
return self.numero_region
class Pedido(models.Model):
numero_pedido = models.IntegerField()
fecha_pedido = models.DateField(max_length=100)
iva = models.IntegerField()
def __int__(self):
return self.numero_pedido
class Proveedores(models.Model):
nombre_proveedor = models.CharField(max_length=40)
direccion = models.CharField(max_length=40)
telefono = models.IntegerField()
mail = models.CharField(max_length=100)
numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)
def __str__(self):
return self.nombre_proveedor
class Suscripcion(models.Model):
fecha_suscripcion = models.DateField
valor_suscripcion = models.IntegerField()
suscrito = models.IntegerField()
|
normal
|
{
"blob_id": "0e19d7251db3382c34ad2d38a7984b65325ecfbf",
"index": 7584,
"step-1": "<mask token>\n\n\nclass Descuento(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-2": "<mask token>\n\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n\n\nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-3": "<mask token>\n\n\nclass Producto(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Cliente(models.Model):\n nombre = models.CharField(max_length=41)\n paterno = models.CharField(max_length=40)\n rut = models.CharField(max_length=9)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nombre\n\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n\n\nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-4": "<mask token>\n\n\nclass Producto(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.nombre\n\n\nclass Cliente(models.Model):\n nombre = models.CharField(max_length=41)\n paterno = models.CharField(max_length=40)\n rut = models.CharField(max_length=9)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nombre\n\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n\n\nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n\n def __(self):\n return self.numero_sucursal\n\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna = models.ForeignKey(Comuna, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.nombre_proveedor\n\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()\n",
"step-5": "from django.db import models\nfrom django.db.models.base import Model\n\n# Create your models here.\nclass Categoria(models.Model):\n categoria = models.CharField(max_length=40)\n def __str__(self):\n return self.categoria\n\nclass Producto(models.Model):\n codigo = models.CharField(max_length=40)\n nombre = models.CharField(max_length=40)\n precio = models.IntegerField()\n stock = models.IntegerField()\n descripcion = models.CharField(max_length=40)\n categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)\n fecha = models.DateField()\n imagen = models.ImageField(null=True, blank=True)\n\n\n\n def __str__(self):\n return self.nombre\n\nclass Cliente(models.Model):\n nombre = models.CharField(max_length=41)\n paterno = models.CharField(max_length=40)\n rut = models.CharField(max_length=9)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nombre\n\nclass Usuario(models.Model):\n mail = models.CharField(max_length=100)\n contraseña = models.CharField(max_length=100)\n rut = models.ForeignKey(Cliente, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.rut\n\nclass DetalleVenta(models.Model):\n tipo_comprovante = models.CharField(max_length=100)\n serie_comprovante = models.CharField(max_length=7)\n fecha_comprovante = models.DateField(max_length=100)\n iva = models.IntegerField()\n total = models.IntegerField()\n\n def __str__(self):\n return self.serie_comprovante\n \nclass Descuento(models.Model):\n codigo_descuento = models.CharField(max_length=7)\n valor_descuento = models.IntegerField()\n\n def __str__(self):\n return self.codigo_descuento\n\nclass Venta(models.Model):\n descripcion = models.CharField(max_length=100)\n total_venta = models.IntegerField()\n\n def __str__(self):\n return self.total_venta\n\nclass Sucursal(models.Model):\n direccion = models.CharField(max_length=100)\n numero_sucursal = models.IntegerField()\n\n def __str__(self):\n return self.numero_sucursal\n\nclass Comuna(models.Model):\n direccion = models.CharField(max_length=100)\n numero_comuna = models.IntegerField()\n numero_sucursal = models.ForeignKey(Sucursal, on_delete=models.DO_NOTHING)\n \n def __ (self):\n return self.numero_sucursal\n\nclass Region(models.Model):\n direccion = models.CharField(max_length=100)\n numero_region = models.IntegerField()\n numero_comuna= models.ForeignKey(Comuna,on_delete=models.DO_NOTHING)\n\n def __str__(self):\n return self.numero_region\n\nclass Pedido(models.Model):\n numero_pedido = models.IntegerField()\n fecha_pedido = models.DateField(max_length=100)\n iva = models.IntegerField()\n\n def __int__(self):\n return self.numero_pedido\n\n\nclass Proveedores(models.Model):\n nombre_proveedor = models.CharField(max_length=40)\n direccion = models.CharField(max_length=40)\n telefono = models.IntegerField()\n mail = models.CharField(max_length=100)\n numero_pedido = models.ForeignKey(Pedido, on_delete=models.DO_NOTHING)\n \n def __str__(self):\n return self.nombre_proveedor\n\nclass Suscripcion(models.Model):\n fecha_suscripcion = models.DateField\n valor_suscripcion = models.IntegerField()\n suscrito = models.IntegerField()",
"step-ids": [
22,
29,
33,
34,
40
]
}
|
[
22,
29,
33,
34,
40
] |
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
def convert(word):
table = {}
count, converted = 0, ''
for w in word:
if w in table:
converted += table[w]
else:
converted += str(count)
table[w] = str(count)
count += 1
return converted
p = convert(pattern)
answer = []
for word in words:
if p == convert(word):
answer.append(word)
return answer
"""
[빠른 풀이]
- zip을 이용해서 길이만 비교!!!
class Solution:
def findAndReplacePattern(self, w: List[str], p: str) -> List[str]:
return [i for i in w if len(set(zip(p,i)))==len(set(p))==len(set(i))]
"""
|
normal
|
{
"blob_id": "e9ea48dec40e75f2fc73f8dcb3b5b975065cf8af",
"index": 5854,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def findAndReplacePattern(self, words: List[str], pattern: str) ->List[str\n ]:\n\n def convert(word):\n table = {}\n count, converted = 0, ''\n for w in word:\n if w in table:\n converted += table[w]\n else:\n converted += str(count)\n table[w] = str(count)\n count += 1\n return converted\n p = convert(pattern)\n answer = []\n for word in words:\n if p == convert(word):\n answer.append(word)\n return answer\n\n\n<mask token>\n",
"step-4": "class Solution:\n def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:\n def convert(word):\n table = {}\n count, converted = 0, ''\n \n for w in word:\n if w in table:\n converted += table[w]\n else:\n converted += str(count)\n table[w] = str(count)\n count += 1\n return converted\n \n p = convert(pattern)\n answer = []\n for word in words:\n if p == convert(word):\n answer.append(word)\n \n return answer\n\n\"\"\"\n[빠른 풀이]\n- zip을 이용해서 길이만 비교!!!\n\nclass Solution:\n def findAndReplacePattern(self, w: List[str], p: str) -> List[str]:\n\t\t\t\t\treturn [i for i in w if len(set(zip(p,i)))==len(set(p))==len(set(i))]\n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Concurrent executor provides concurrent executing function either in
a thread pool or a process pool
"""
import splunktalib.concurrent.process_pool as pp
import splunktalib.concurrent.thread_pool as tp
class ConcurrentExecutor:
def __init__(self, config):
"""
:param config: dict like object, contains thread_min_size (int),
thread_max_size (int), daemonize_thread (bool),
process_size (int)
"""
self._io_executor = tp.ThreadPool(
config.get("thread_min_size", 0),
config.get("thread_max_size", 0),
config.get("task_queue_size", 1024),
config.get("daemonize_thread", True),
)
self._compute_executor = None
if config.get("process_size", 0):
self._compute_executor = pp.ProcessPool(config.get("process_size", 0))
def start(self):
self._io_executor.start()
def tear_down(self):
self._io_executor.tear_down()
if self._compute_executor is not None:
self._compute_executor.tear_down()
def run_io_func_sync(self, func, args=(), kwargs=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
return self._io_executor.apply(func, args, kwargs)
def run_io_func_async(self, func, args=(), kwargs=None, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:calllback: when func is done and without exception, call the callback
:return whatever the func returns
"""
return self._io_executor.apply_async(func, args, kwargs, callback)
def enqueue_io_funcs(self, funcs, block=True):
"""
run jobs in a fire and forget way, no result will be handled
over to clients
:param funcs: tuple/list-like or generator like object, func shall be
callable
"""
return self._io_executor.enqueue_funcs(funcs, block)
def run_compute_func_sync(self, func, args=(), kwargs={}):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
assert self._compute_executor is not None
return self._compute_executor.apply(func, args, kwargs)
def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:calllback: when func is done and without exception, call the callback
:return whatever the func returns
"""
assert self._compute_executor is not None
return self._compute_executor.apply_async(func, args, kwargs, callback)
|
normal
|
{
"blob_id": "24b1afb18e1cfdc8d5a62f5ee0147b2d73bc10d8",
"index": 7492,
"step-1": "<mask token>\n\n\nclass ConcurrentExecutor:\n <mask token>\n <mask token>\n <mask token>\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n <mask token>\n <mask token>\n <mask token>\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-2": "<mask token>\n\n\nclass ConcurrentExecutor:\n <mask token>\n\n def start(self):\n self._io_executor.start()\n <mask token>\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n <mask token>\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-3": "<mask token>\n\n\nclass ConcurrentExecutor:\n\n def __init__(self, config):\n \"\"\"\n :param config: dict like object, contains thread_min_size (int),\n thread_max_size (int), daemonize_thread (bool),\n process_size (int)\n \"\"\"\n self._io_executor = tp.ThreadPool(config.get('thread_min_size', 0),\n config.get('thread_max_size', 0), config.get('task_queue_size',\n 1024), config.get('daemonize_thread', True))\n self._compute_executor = None\n if config.get('process_size', 0):\n self._compute_executor = pp.ProcessPool(config.get(\n 'process_size', 0))\n\n def start(self):\n self._io_executor.start()\n\n def tear_down(self):\n self._io_executor.tear_down()\n if self._compute_executor is not None:\n self._compute_executor.tear_down()\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n <mask token>\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-4": "<mask token>\nimport splunktalib.concurrent.process_pool as pp\nimport splunktalib.concurrent.thread_pool as tp\n\n\nclass ConcurrentExecutor:\n\n def __init__(self, config):\n \"\"\"\n :param config: dict like object, contains thread_min_size (int),\n thread_max_size (int), daemonize_thread (bool),\n process_size (int)\n \"\"\"\n self._io_executor = tp.ThreadPool(config.get('thread_min_size', 0),\n config.get('thread_max_size', 0), config.get('task_queue_size',\n 1024), config.get('daemonize_thread', True))\n self._compute_executor = None\n if config.get('process_size', 0):\n self._compute_executor = pp.ProcessPool(config.get(\n 'process_size', 0))\n\n def start(self):\n self._io_executor.start()\n\n def tear_down(self):\n self._io_executor.tear_down()\n if self._compute_executor is not None:\n self._compute_executor.tear_down()\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n\n def run_io_func_async(self, func, args=(), kwargs=None, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply_async(func, args, kwargs, callback)\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-5": "#\n# Copyright 2021 Splunk Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nConcurrent executor provides concurrent executing function either in\na thread pool or a process pool\n\"\"\"\n\nimport splunktalib.concurrent.process_pool as pp\nimport splunktalib.concurrent.thread_pool as tp\n\n\nclass ConcurrentExecutor:\n def __init__(self, config):\n \"\"\"\n :param config: dict like object, contains thread_min_size (int),\n thread_max_size (int), daemonize_thread (bool),\n process_size (int)\n \"\"\"\n\n self._io_executor = tp.ThreadPool(\n config.get(\"thread_min_size\", 0),\n config.get(\"thread_max_size\", 0),\n config.get(\"task_queue_size\", 1024),\n config.get(\"daemonize_thread\", True),\n )\n self._compute_executor = None\n if config.get(\"process_size\", 0):\n self._compute_executor = pp.ProcessPool(config.get(\"process_size\", 0))\n\n def start(self):\n self._io_executor.start()\n\n def tear_down(self):\n self._io_executor.tear_down()\n if self._compute_executor is not None:\n self._compute_executor.tear_down()\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n\n return self._io_executor.apply(func, args, kwargs)\n\n def run_io_func_async(self, func, args=(), kwargs=None, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n\n return self._io_executor.apply_async(func, args, kwargs, callback)\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-ids": [
3,
6,
8,
10,
11
]
}
|
[
3,
6,
8,
10,
11
] |
from django.db import models
from private_storage.fields import PrivateFileField
class PrivateFile(models.Model):
title = models.CharField("Title", max_length=200)
file = PrivateFileField("File")
class PrivateFile2(models.Model):
title = models.CharField("Title", max_length=200)
file = models.FileField("File")
|
normal
|
{
"blob_id": "e12c397ca1ae91ce314cda5fe2cd8e0ec4cfa861",
"index": 2199,
"step-1": "<mask token>\n\n\nclass PrivateFile2(models.Model):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PrivateFile(models.Model):\n <mask token>\n <mask token>\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField('Title', max_length=200)\n file = models.FileField('File')\n",
"step-3": "<mask token>\n\n\nclass PrivateFile(models.Model):\n title = models.CharField('Title', max_length=200)\n file = PrivateFileField('File')\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField('Title', max_length=200)\n file = models.FileField('File')\n",
"step-4": "from django.db import models\nfrom private_storage.fields import PrivateFileField\n\n\nclass PrivateFile(models.Model):\n title = models.CharField('Title', max_length=200)\n file = PrivateFileField('File')\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField('Title', max_length=200)\n file = models.FileField('File')\n",
"step-5": "from django.db import models\nfrom private_storage.fields import PrivateFileField\n\n\nclass PrivateFile(models.Model):\n title = models.CharField(\"Title\", max_length=200)\n file = PrivateFileField(\"File\")\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField(\"Title\", max_length=200)\n file = models.FileField(\"File\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK
global indent_stack
indent_stack = INDENT_STACK() # single instance
def convert_tabs(s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
report_error("Found link split across multiple lines. We can't process this.")
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
# Now swap out the bad href for the fixed one in inputline.
if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace")
|
normal
|
{
"blob_id": "e6010ec05ec24dcd2a44e54ce1b1f11000e775ce",
"index": 8399,
"step-1": "<mask token>\n\n\nclass INDENT_STACK:\n <mask token>\n\n def __init__(self):\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def init_indent(self):\n del self.my_stack\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n <mask token>\n\n def set_current_type(self, new_type):\n self.my_stack[-1]['type'] = new_type\n\n def pop_indent(self):\n if len(self.my_stack) > 1:\n return self.my_stack.pop()['physical']\n else:\n return 0\n <mask token>\n\n def logical_indent_level(self):\n return self.my_stack[-1]['logical']\n\n def current_type(self):\n return self.my_stack[-1]['type']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass INDENT_STACK:\n \"\"\"This class maintains the indent stack during doc parsing.\"\"\"\n\n def __init__(self):\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def init_indent(self):\n del self.my_stack\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def push_indent(self, n, new_type):\n level = self.logical_indent_level() + (self.current_type() == 'bullet')\n self.my_stack.append({'physical': n, 'logical': level, 'type':\n new_type})\n\n def set_current_type(self, new_type):\n self.my_stack[-1]['type'] = new_type\n\n def pop_indent(self):\n if len(self.my_stack) > 1:\n return self.my_stack.pop()['physical']\n else:\n return 0\n\n def current_indent(self):\n return self.my_stack[-1]['physical']\n\n def logical_indent_level(self):\n return self.my_stack[-1]['logical']\n\n def current_type(self):\n return self.my_stack[-1]['type']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef report_error(s):\n print >> sys.stderr, 'ERROR: ' + s\n print >> sys.stderr, 'on line: ' + str(FNR) + ' in file: ' + FILENAME\n print >> sys.stderr, inputline\n exit(1)\n\n\n<mask token>\n\n\nclass INDENT_STACK:\n \"\"\"This class maintains the indent stack during doc parsing.\"\"\"\n\n def __init__(self):\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def init_indent(self):\n del self.my_stack\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def push_indent(self, n, new_type):\n level = self.logical_indent_level() + (self.current_type() == 'bullet')\n self.my_stack.append({'physical': n, 'logical': level, 'type':\n new_type})\n\n def set_current_type(self, new_type):\n self.my_stack[-1]['type'] = new_type\n\n def pop_indent(self):\n if len(self.my_stack) > 1:\n return self.my_stack.pop()['physical']\n else:\n return 0\n\n def current_indent(self):\n return self.my_stack[-1]['physical']\n\n def logical_indent_level(self):\n return self.my_stack[-1]['logical']\n\n def current_type(self):\n return self.my_stack[-1]['type']\n\n\n<mask token>\n",
"step-4": "<mask token>\nEXCLUDED_CHARS_REGEX_GHM = '[^\\\\w\\\\-]'\nEXCLUDED_CHARS_REGEX_DOX = '[^\\\\w\\\\.\\\\-]'\n\n\ndef report_error(s):\n print >> sys.stderr, 'ERROR: ' + s\n print >> sys.stderr, 'on line: ' + str(FNR) + ' in file: ' + FILENAME\n print >> sys.stderr, inputline\n exit(1)\n\n\ndef trace(msg):\n if TRACE:\n print >> sys.stderr, 'TRACE: ' + inspect.currentframe(\n ).f_back.f_code.co_name + ' : InputLine ' + str(FNR) + ' : ' + msg\n\n\nclass INDENT_STACK:\n \"\"\"This class maintains the indent stack during doc parsing.\"\"\"\n\n def __init__(self):\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def init_indent(self):\n del self.my_stack\n self.my_stack = [{'physical': 0, 'logical': 0, 'type': 'none'}]\n\n def push_indent(self, n, new_type):\n level = self.logical_indent_level() + (self.current_type() == 'bullet')\n self.my_stack.append({'physical': n, 'logical': level, 'type':\n new_type})\n\n def set_current_type(self, new_type):\n self.my_stack[-1]['type'] = new_type\n\n def pop_indent(self):\n if len(self.my_stack) > 1:\n return self.my_stack.pop()['physical']\n else:\n return 0\n\n def current_indent(self):\n return self.my_stack[-1]['physical']\n\n def logical_indent_level(self):\n return self.my_stack[-1]['logical']\n\n def current_type(self):\n return self.my_stack[-1]['type']\n\n\nglobal indent_stack\nindent_stack = INDENT_STACK()\n\n\ndef convert_tabs(s):\n trace('orig length {0}'.format(len(s)))\n ct = s.count('\\t')\n s = s.expandtabs(4)\n trace('after {0} tab substitutions, end length is {1}'.format(ct, len(s)))\n return s\n\n\ndef fix_prefix_blanks(new_type):\n global inputline\n prefix_blanks = re.search('^[\\\\s]*', inputline)\n if prefix_blanks:\n prefix_blanks = prefix_blanks.group()\n trace('After prefix-blanks match, prefix_blanks is |' +\n prefix_blanks + '| length is ' + str(len(prefix_blanks)))\n prefix_blanks = convert_tabs(prefix_blanks)\n else:\n prefix_blanks = ''\n trace('After convert_tabs, prefix_blanks is |' + prefix_blanks +\n '| length is ' + str(len(prefix_blanks)))\n trace('current logical_indent_level is {0} and current_indent is {1}'.\n format(indent_stack.logical_indent_level(), indent_stack.\n current_indent()))\n while len(prefix_blanks) < indent_stack.current_indent():\n indent_stack.pop_indent()\n if len(prefix_blanks) > indent_stack.current_indent():\n indent_stack.push_indent(len(prefix_blanks), new_type)\n else:\n indent_stack.set_current_type(new_type)\n trace((\n \"After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} \"\n + 'and current_indent is {1}').format(indent_stack.\n logical_indent_level(), indent_stack.current_indent()))\n trace('Orig line is ' + inputline)\n inputline = re.sub('^[\\\\s]*', BLANKS[0:4 * indent_stack.\n logical_indent_level()], inputline, 1)\n trace('New line is ' + inputline)\n\n\ndef rewrite_relative_links():\n global inputline\n trace('entering with line: ' + inputline)\n num_links = inputline.count('](')\n links = re.findall('\\\\[[^\\\\]]+\\\\]\\\\([^)]+\\\\)', inputline)\n num_whole_links = len(links)\n trace('num_links = {0}, num_whole_links = {1}'.format(num_links,\n num_whole_links))\n if num_links != num_whole_links:\n if re.search('\\\\[[^\\\\][!]*\\\\![\\\\s]*\\\\[', inputline):\n trace('WARNING: Found nested link label expressions.')\n return\n else:\n report_error(\n \"Found link split across multiple lines. We can't process this.\"\n )\n for linkitem in links:\n pieces = re.search(\n '(\\\\[[\\\\s`]*)([^\\\\]]*[^\\\\s`\\\\]])([\\\\s`]*\\\\]\\\\([\\\\s]*)([^\\\\s]+)([\\\\s]*\\\\))'\n , linkitem).groups()\n trace('Link: ' + linkitem)\n trace('Pieces: ' + ' '.join((pieces[0], pieces[1], pieces[2],\n pieces[3], pieces[4])))\n labeltext = pieces[1]\n href = pieces[3]\n trace('Extracted labeltext is: ' + labeltext)\n trace('Extracted href is: ' + href)\n if re.search('^http|\\\\?', href):\n trace('skipping absolute or parameterized URL')\n continue\n num_sharps = href.count('#')\n if num_sharps >= 2:\n report_error(\"Multiple #'s in a single link href.\")\n elif num_sharps == 1:\n if not re.search('^#|\\\\.[^/#]+#', href):\n if not href.count('/#'):\n href = re.sub('#', '/#', href, 1)\n href = re.sub('/#', '/index.html#', href, 1)\n href = re.sub('^README\\\\.md#', 'index.html#', href)\n href = re.sub('/README\\\\.md#', '/index.html#', href)\n href = re.sub('\\\\.md#', '.html#', href)\n else:\n if not re.search('\\\\.[^/]+$', href):\n if not href.endswith('/'):\n href = href + '/'\n href = re.sub('/$', '/index.html', href)\n href = re.sub('^README\\\\.md$', 'index.html', href)\n href = re.sub('/README\\\\.md$', '/index.html', href)\n href = re.sub('\\\\.md$', '.html', href)\n trace('After .md fixup, href is: ' + href)\n sharp = href.find('#')\n if sharp >= 0:\n named_anchor = href[sharp + 1:]\n trace('named_anchor = \"' + named_anchor + '\"')\n trace('labeltext = \"' + labeltext + '\"')\n scratch = labeltext.lower()\n scratch = re.sub('[\\\\s]', '-', scratch)\n scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, '', scratch)\n if scratch == named_anchor:\n trace('Found a rewritable case')\n scratch = labeltext\n scratch = re.sub('[\\\\s]', '_', scratch)\n scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, '', scratch)\n href = re.sub('#' + named_anchor, '#' + scratch, href)\n trace('After anchor rewrite, href is: ' + href)\n if href != pieces[3]:\n scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]\n trace('Fixed link text is: ' + scratch)\n trace('linkitem is still: ' + linkitem)\n k = inputline.find(linkitem)\n inputline = inputline[:k] + scratch + inputline[k + len(linkitem):]\n trace('Fixed inputline is: ' + inputline)\n\n\nglobal inputline, active_type\nBLANKS = (\n ' '\n )\nTRACE = 0\nFNR = -1\ntrace('Starting trace')\nindent_stack.init_indent()\nactive_type = 'none'\nif len(sys.argv) <= 1:\n report_error(\n 'Please provide names of files to be processed, as command line arguments.'\n )\nfor FILENAME in sys.argv[1:]:\n infile = open(FILENAME, 'r')\n outfile = open(FILENAME + '.tmp', 'w')\n FNR = 0\n H1_COUNT = 0\n for inputline in infile:\n FNR += 1\n inputline = inputline.rstrip('\\n')\n if '](' in inputline:\n rewrite_relative_links()\n if active_type == 'code' and '```' not in inputline:\n trace('in codeblock, regular line')\n print >> outfile, inputline\n continue\n if active_type == 'code' and '```' in inputline:\n trace('in codeblock, end delimiter line')\n if re.search('```[\\\\s]*[^\\\\s]', inputline):\n report_error(\n 'Text following codeblock end delimiter (```) on same line.'\n )\n if re.search('```.*```', inputline):\n report_error(\n 'Two sets of codeblock delimiters (```) on same line.')\n active_type = 'none'\n inputline = re.sub('^[\\\\s]*', BLANKS[0:4 * indent_stack.\n logical_indent_level()], inputline)\n print >> outfile, inputline\n continue\n if active_type != 'code' and '```' in inputline:\n trace('start codeblock, delimiter line')\n if re.search('[^\\\\s][\\\\s]*```', inputline):\n report_error(\n 'Text preceding codeblock start delimiter (```) on same line.'\n )\n if re.search('```.*```', inputline):\n report_error(\n 'Two sets of codeblock delimiters (```) on same line.')\n if active_type == 'text' or active_type == 'bullet':\n print >> outfile, ''\n active_type = 'code'\n fix_prefix_blanks(active_type)\n print >> outfile, inputline\n continue\n if re.search('^[\\\\s]*$', inputline):\n trace('blank line')\n active_type = 'none'\n print >> outfile, inputline\n continue\n if re.search('^[\\\\s]*([*+-]|[\\\\d]+\\\\.)[\\\\s]', inputline):\n trace('bullet line')\n if active_type == 'text':\n print >> outfile, ''\n active_type = 'bullet'\n fix_prefix_blanks(active_type)\n print >> outfile, inputline\n continue\n if inputline.startswith('#'):\n trace('header line')\n active_type = 'none'\n indent_stack.init_indent()\n if re.search('^#[^#]', inputline):\n anchor_name = re.sub(' ', '_', inputline[1:].strip())\n anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, '', anchor_name)\n anchor_text = '<a name=\"' + anchor_name + '\"></a>'\n if H1_COUNT == 0:\n H1_COUNT = 1\n print >> outfile, inputline\n print >> outfile, anchor_text\n print >> outfile, ''\n else:\n print >> outfile, ''\n print >> outfile, anchor_text\n print >> outfile, inputline\n else:\n print >> outfile, inputline\n continue\n if re.search('^[\\\\s]*#', inputline):\n trace('header line, bad')\n report_error(\n 'Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.'\n )\n trace('text line')\n if active_type == 'none':\n active_type = 'text'\n fix_prefix_blanks(active_type)\n print >> outfile, inputline\n continue\n else:\n print >> outfile, inputline\n continue\n if active_type == 'code':\n report_error('Unmatched codeblock delimiter (```) detected.')\n infile.close()\n outfile.close()\n os.rename(FILENAME + '.tmp', FILENAME)\ntrace('ending trace')\n",
"step-5": "#########################################################################\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#########################################################################\n\n\n## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),\n## codeblock, table, and block quote.\n##\n## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.\n## Specifically, it fixes these problems:\n## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they\n## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.\n## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,\n## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text\n## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).\n## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or\n## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure\n## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to\n## paragraph munching and delimiter ignoring.\n## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as\n## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit\n## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the\n## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below\n## text or headers. This may make the text less pretty than the Github-MD presentation, but it\n## avoids the incorrect codeblocking.\n## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the\n## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.\n## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and\n## as best we can we will re-write named anchors referring to tags autogenerated from headers.\n## The problem with generated tags is that Github-MD forces header text to lower-case, and\n## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces\n## blanks with underscores. Fortunately we seem to have a culture of using link references that\n## are typographically the same as the header text, so we have some basis for fixing most links.\n## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know\n## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the\n## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.\n##\n## So far, we're ignoring tables and block quotes.\n##\n## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.\n\n\nimport sys\nimport os\nimport inspect\nimport re\n\n# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.\nEXCLUDED_CHARS_REGEX_GHM = r'[^\\w\\-]' # all non-alphanumerics except \"-\" and \"_\". Whitespace are previously converted.\nEXCLUDED_CHARS_REGEX_DOX = r'[^\\w\\.\\-]' # all non-alphanumerics except \"-\", \"_\", and \".\". Whitespace are previously converted.\n\ndef report_error(s) :\n print >>sys.stderr, \"ERROR: \" + s \n print >>sys.stderr, \"on line: \" + str(FNR) + \" in file: \" + FILENAME \n print >>sys.stderr, inputline\n exit(1)\n\n\ndef trace(msg) :\n if TRACE :\n print >>sys.stderr, \"TRACE: \" + inspect.currentframe().f_back.f_code.co_name + \" : InputLine \" + str(FNR) + \" : \" + msg\n\nclass INDENT_STACK :\n 'This class maintains the indent stack during doc parsing.'\n\n def __init__(self) :\n self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]\n\n def init_indent(self) :\n del self.my_stack\n self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]\n\n def push_indent(self, n, new_type) :\n #Increment the logical depth only if under a bullet type. This fixes problem #3.\n level = self.logical_indent_level() + (self.current_type() == \"bullet\") # plus 1 if true\n self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )\n\n def set_current_type(self, new_type) :\n # adjust topmost type\n self.my_stack[-1]['type'] = new_type\n\n def pop_indent(self) :\n if len(self.my_stack) > 1 :\n return self.my_stack.pop()['physical']\n else :\n return 0\n\n def current_indent(self) :\n # top of stack, physical\n return self.my_stack[-1]['physical']\n\n def logical_indent_level(self) :\n # top of stack, logical\n return self.my_stack[-1]['logical']\n\n def current_type(self) :\n # top of stack, type\n return self.my_stack[-1]['type']\n\n ## End class INDENT_STACK\n\nglobal indent_stack\nindent_stack = INDENT_STACK() # single instance\n\n\ndef convert_tabs(s) :\n # Courtesy of Python, this does a real column-aware tab expansion.\n # If this doesn't work, we'll need to go back to erroring on \" \\t\", that is, spaces followed by tabs.\n trace(\"orig length {0}\".format(len(s)) )\n ct = s.count(\"\\t\")\n s = s.expandtabs(4)\n trace(\"after {0} tab substitutions, end length is {1}\".format(ct, len(s)) )\n return s\n\n\ndef fix_prefix_blanks(new_type) :\n global inputline\n # Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.\n # Don't worry about blank lines here, they are filtered out before calling this method.\n # Both uses and maintains the indent stack, which is why we need the new_type passed in.\n prefix_blanks = re.search(r'^[\\s]*', inputline)\n if prefix_blanks :\n prefix_blanks = prefix_blanks.group()\n trace(\"After prefix-blanks match, prefix_blanks is |\" + prefix_blanks + \"| length is \" + str(len(prefix_blanks)) )\n prefix_blanks = convert_tabs(prefix_blanks)\n else :\n prefix_blanks = \"\"\n\n trace(\"After convert_tabs, prefix_blanks is |\" + prefix_blanks + \"| length is \" + str(len(prefix_blanks)) )\n\n # prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.\n # The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.\n # If the indent is less than previous, is it equal to the indent of the next lower indented object?\n # Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?\n # The latter case is an anomoly, but there's no enforcement in Github-MD.\n # The following logic is an empirical reverse engineering, that seems adequate so far.\n # It basically says, find a prior level of indent that this is not less than, and then pretend that\n # the objects between it and this object weren't there.\n\n trace(\"current logical_indent_level is {0} and current_indent is {1}\".format(\n indent_stack.logical_indent_level(), indent_stack.current_indent() ))\n while len(prefix_blanks) < indent_stack.current_indent() :\n indent_stack.pop_indent()\n if len(prefix_blanks) > indent_stack.current_indent() :\n indent_stack.push_indent(len(prefix_blanks), new_type)\n else : # len(prefix_blanks) == indent_stack.current_indent()\n indent_stack.set_current_type(new_type)\n\n trace((\"After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} \" +\n \"and current_indent is {1}\").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))\n\n # Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to\n # the logical indent level.\n trace(\"Orig line is \" + inputline)\n inputline = re.sub(r'^[\\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)\n trace(\"New line is \" + inputline)\n\n\ndef rewrite_relative_links() :\n global inputline\n trace(\"entering with line: \" + inputline)\n # Fix up the relative links in inputline. This fixes problem #5.\n num_links = inputline.count(\"](\")\n links = re.findall(r'\\[[^\\]]+\\]\\([^)]+\\)', inputline)\n num_whole_links = len(links)\n trace(\"num_links = {0}, num_whole_links = {1}\".format(num_links, num_whole_links))\n if (num_links != num_whole_links) :\n if re.search(r'\\[[^\\][!]*\\![\\s]*\\[', inputline) :\n # Nested link label expressions, with '!'.\n # Special case where a link value is inlined into the link label,\n # as in the first line of the base README.md file. Bail on such lines.\n trace(\"WARNING: Found nested link label expressions.\")\n return\n else :\n report_error(\"Found link split across multiple lines. We can't process this.\")\n\n for linkitem in links :\n pieces = re.search(r'(\\[[\\s`]*)([^\\]]*[^\\s`\\]])([\\s`]*\\]\\([\\s]*)([^\\s]+)([\\s]*\\))', linkitem).groups()\n trace(\"Link: \" + linkitem)\n trace(\"Pieces: \" + \" \".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))\n labeltext = pieces[1]\n href = pieces[3]\n trace(\"Extracted labeltext is: \" + labeltext)\n trace(\"Extracted href is: \" + href)\n if re.search(r'^http|\\?', href) :\n # Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.\n trace(\"skipping absolute or parameterized URL\")\n continue\n\n # Rewrite implicit index references to explicit, so the book will work as well\n # with 'file:///' preview as with a real web server.\n # We are only concerned with file path names here, so split at '#' if present.\n num_sharps = href.count(\"#\")\n if (num_sharps >= 2) :\n report_error(\"Multiple #'s in a single link href.\")\n elif (num_sharps == 1) :\n # Implicit index references are directory names, which seldom have a filetype suffix.\n # On the other hand, explicit file references must have filetype, else the browser\n # won't know what to do with it. So if no filetype extension, assume is a directory\n # and add 'index.html'. Skip if this is an intra-document link.\n if not re.search(r'^#|\\.[^/#]+#', href) :\n if not href.count(\"/#\") : \n href = re.sub(r'#', \"/#\", href, 1)\n href = re.sub(r'/#', \"/index.html#\", href, 1)\n\n # Fix up '.md' references.\n href = re.sub(r'^README\\.md#', \"index.html#\", href)\n href = re.sub(r'/README\\.md#', \"/index.html#\", href)\n href = re.sub(r'\\.md#', \".html#\", href)\n\n else : # num_sharps == 0\n # Same logic as above, just at $ instead of #.\n if not re.search(r'\\.[^/]+$', href) :\n if not href.endswith(\"/\") :\n href = href + \"/\"\n href = re.sub(r'/$', \"/index.html\", href)\n\n # Fix up '.md' references.\n href = re.sub(r'^README\\.md$', \"index.html\", href)\n href = re.sub(r'/README\\.md$', \"/index.html\", href)\n href = re.sub(r'\\.md$', \".html\", href)\n\n trace(\"After .md fixup, href is: \" + href)\n\n # Re-write named anchors referring to generated tags.\n sharp = href.find(\"#\")\n if (sharp >= 0) :\n named_anchor = href[sharp+1 : ]\n trace('named_anchor = \"' + named_anchor + '\"')\n trace('labeltext = \"' + labeltext + '\"')\n scratch = labeltext.lower() # Github-MD forces all anchors to lowercase\n scratch = re.sub(r'[\\s]', \"-\", scratch) # convert whitespace to \"-\"\n scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, \"\", scratch) # strip non-alphanumerics\n if (scratch == named_anchor) :\n trace(\"Found a rewritable case\")\n scratch = labeltext # Doxia-markdown doesn't change case\n scratch = re.sub(r'[\\s]', \"_\", scratch) # convert whitespace to \"_\"\n scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, \"\", scratch) # strip non-alphanumerics except \".\"\n href = re.sub(\"#\" + named_anchor, \"#\" + scratch, href)\n\n trace(\"After anchor rewrite, href is: \" + href)\n \n # Now swap out the bad href for the fixed one in inputline.\n if (href != pieces[3]) :\n # Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.\n scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]\n trace(\"Fixed link text is: \" + scratch)\n trace(\"linkitem is still: \" + linkitem)\n k = inputline.find(linkitem)\n inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]\n trace(\"Fixed inputline is: \" + inputline)\n\n\n\n################################################\n# begin state machine\n\nglobal inputline, active_type\nBLANKS = \" \"\nTRACE = 0\nFNR = -1\ntrace(\"Starting trace\")\n\n# Github uses relative indents, but doxia wants only and exactly multiples of 4.\n# To turn the more forgiving into more regular, we must track both logical and actual indents.\nindent_stack.init_indent()\n\n# Paragraph type can be none, text, bullet, code, or heading.\n# Note 'current_type()' used in managing the logical indent level on the indent stack,\n# and 'active_type' used in the pattern recognition state machine, are deliberately different.\nactive_type = \"none\"\n\n# Note: order of the below 'if' clauses is critically important for the state machine.\n# Don't change the order.\n\nif len(sys.argv) <= 1 :\n report_error(\"Please provide names of files to be processed, as command line arguments.\")\n\nfor FILENAME in sys.argv[1:] :\n infile = open(FILENAME, 'r')\n outfile = open(FILENAME + \".tmp\", 'w')\n FNR = 0\n H1_COUNT = 0\n for inputline in infile :\n FNR += 1\n inputline = inputline.rstrip(\"\\n\")\n\n if '](' in inputline :\n # Detect lines with hyperlinks in them, and re-write them if necessary and possible.\n # This is the only fall-through block, and we put it at the very beginning.\n rewrite_relative_links(); # in inputline\n # Fall through for further processing.\n\n if (active_type == \"code\") and (\"```\" not in inputline) :\n trace(\"in codeblock, regular line\")\n # what happens in the codeblock, stays in the codeblock\n # Put this case first (after link detection), so we don't have to test it in all the other cases.\n print >>outfile, inputline\n continue\n\n if (active_type == \"code\") and (\"```\" in inputline) :\n trace(\"in codeblock, end delimiter line\")\n # detect end of codeblock\n # This must be the second case.\n if re.search(r'```[\\s]*[^\\s]', inputline) :\n # If there's text following the end-``` on the same line, error out and fix it in the source file.\n report_error(\"Text following codeblock end delimiter (```) on same line.\")\n\n if re.search(r'```.*```', inputline) :\n # If there are two sets of triple-ticks on the same line, that's a problem too.\n report_error(\"Two sets of codeblock delimiters (```) on same line.\")\n\n active_type = \"none\"\n # Force the indenting of the end-``` to match the beginning. This fixes problem #4.\n inputline = re.sub(r'^[\\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)\n print >>outfile, inputline\n continue\n\n if (active_type != \"code\") and (\"```\" in inputline) :\n trace(\"start codeblock, delimiter line\")\n # detect start of codeblock\n if re.search(r'[^\\s][\\s]*```', inputline) :\n # If there's text preceding the begin-``` on the same line, error out and fix it in the source file.\n report_error(\"Text preceding codeblock start delimiter (```) on same line.\")\n\n if re.search(r'```.*```', inputline) :\n # If there are two sets of triple-ticks on the same line, that's a problem too.\n report_error(\"Two sets of codeblock delimiters (```) on same line.\")\n\n if active_type == \"text\" or active_type == \"bullet\" :\n print >>outfile, \"\" # Need preceding blank line before codeblock, in doxia.\n\n active_type = \"code\"\n fix_prefix_blanks(active_type) # in inputline\n print >>outfile, inputline\n continue\n\n if re.search(r'^[\\s]*$', inputline) :\n trace(\"blank line\")\n # detect blank lines\n active_type = \"none\"\n print >>outfile, inputline # Perhaps this should be print \"\" instead?\n continue\n\n if re.search(r'^[\\s]*([*+-]|[\\d]+\\.)[\\s]', inputline) :\n trace(\"bullet line\")\n # detect bullet line (numbered or not)\n if (active_type == \"text\") :\n print >>outfile, \"\" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.\n\n active_type = \"bullet\"\n fix_prefix_blanks(active_type); # in inputline\n print >>outfile, inputline\n continue\n\n if inputline.startswith(\"#\") :\n trace(\"header line\")\n # detects header lines, which are self-delimiting, and cannot have indenting\n # Header line resets the indenting as well as current type\n active_type = \"none\"\n indent_stack.init_indent()\n if re.search(r'^#[^#]', inputline) :\n # First-level headers (\"H1\") need explicit anchor inserted (Doxia style). This fixes problem #6.\n anchor_name = re.sub(r' ', \"_\", inputline[1:].strip())\n anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, \"\", anchor_name)\n anchor_text = '<a name=\"' + anchor_name + '\"></a>'\n if H1_COUNT == 0 :\n # Treat the first header differently - put the header after instead of before\n # This is necessary to preserve document metadata titling in generated html.\n # However, it means the title itself gets hidden above the top of window, when the link is used.\n H1_COUNT = 1\n print >>outfile, inputline\n print >>outfile, anchor_text\n print >>outfile, \"\" # Anchors aren't self-delimiting, so insert a blank line after.\n else :\n print >>outfile, \"\" # Anchors aren't self-delimiting, so insert a blank line first.\n print >>outfile, anchor_text\n print >>outfile, inputline\n else :\n # H2 or deeper level of header, doxia auto-generates anchor.\n print >>outfile, inputline\n continue\n\n if re.search(r'^[\\s]*#', inputline) :\n trace(\"header line, bad\")\n report_error(\"Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.\")\n\n ## default action -- last case in state machine switch\n trace(\"text line\")\n # Everything else is text-like, and therefore continues active_type, unless none.\n if (active_type == \"none\") :\n # Start new text paragraph.\n active_type = \"text\"\n fix_prefix_blanks(active_type); # in inputline\n print >>outfile, inputline\n continue\n else :\n # This is just a continuation of current text or bullet.\n # Indenting is irrelevant.\n print >>outfile, inputline\n continue\n\n ## end loop on inputlines\n if (active_type == \"code\") :\n report_error(\"Unmatched codeblock delimiter (```) detected.\")\n\n infile.close()\n outfile.close()\n os.rename(FILENAME + \".tmp\", FILENAME)\n\n## end loop on FILENAMEs\ntrace(\"ending trace\")\n",
"step-ids": [
7,
10,
11,
17,
19
]
}
|
[
7,
10,
11,
17,
19
] |
from page_objects import PageObject, PageElement
class MainPage(PageObject):
level_menu_opened = False
level_menu_created = False
css_input = PageElement(css='input.input-strobe')
level_text_span = PageElement(css='span.level-text')
instruction_h2 = PageElement(css='h2.order')
enter_button = PageElement(css='div.enter-button')
level_menu = PageElement(
xpath='//div[@class="level-menu-toggle-wrapper"]')
def __init__(self, webdriver, root_uri=None):
super(MainPage, self).__init__(webdriver, root_uri)
# hack to initialize all the menu items
self.open_level_menu()
self.close_level_menu()
def ensure_menu_created(self):
if not self.level_menu_created:
self.open_level_menu()
self.close_level_menu()
def open_level_menu(self):
if not self.level_menu_opened:
self.level_menu.click()
self.level_menu_opened = True
self.level_menu_created = True
def close_level_menu(self):
if self.level_menu_opened:
self.level_menu.click()
self.level_menu_opened = False
def get_level_link(self, level_number):
return PageElement(
xpath='//span[@class="level-number" and text() = "{0}"]/..'
.format(level_number)
)
def open_level(self, level_number):
self.open_level_menu()
self.get_level_link(level_number).click()
def css_write(self, css):
self.css_input = css
self.enter_button.click()
def do_level1(self):
self.open_level(1)
self.css_write("page")
self.level1_link.click()
|
normal
|
{
"blob_id": "c6cf085330f47ffb139c5acc91d91e9758f5396a",
"index": 274,
"step-1": "<mask token>\n\n\nclass MainPage(PageObject):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, webdriver, root_uri=None):\n super(MainPage, self).__init__(webdriver, root_uri)\n self.open_level_menu()\n self.close_level_menu()\n <mask token>\n <mask token>\n\n def close_level_menu(self):\n if self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = False\n\n def get_level_link(self, level_number):\n return PageElement(xpath=\n '//span[@class=\"level-number\" and text() = \"{0}\"]/..'.format(\n level_number))\n\n def open_level(self, level_number):\n self.open_level_menu()\n self.get_level_link(level_number).click()\n\n def css_write(self, css):\n self.css_input = css\n self.enter_button.click()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MainPage(PageObject):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, webdriver, root_uri=None):\n super(MainPage, self).__init__(webdriver, root_uri)\n self.open_level_menu()\n self.close_level_menu()\n <mask token>\n <mask token>\n\n def close_level_menu(self):\n if self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = False\n\n def get_level_link(self, level_number):\n return PageElement(xpath=\n '//span[@class=\"level-number\" and text() = \"{0}\"]/..'.format(\n level_number))\n\n def open_level(self, level_number):\n self.open_level_menu()\n self.get_level_link(level_number).click()\n\n def css_write(self, css):\n self.css_input = css\n self.enter_button.click()\n\n def do_level1(self):\n self.open_level(1)\n self.css_write('page')\n self.level1_link.click()\n",
"step-3": "<mask token>\n\n\nclass MainPage(PageObject):\n level_menu_opened = False\n level_menu_created = False\n css_input = PageElement(css='input.input-strobe')\n level_text_span = PageElement(css='span.level-text')\n instruction_h2 = PageElement(css='h2.order')\n enter_button = PageElement(css='div.enter-button')\n level_menu = PageElement(xpath='//div[@class=\"level-menu-toggle-wrapper\"]')\n\n def __init__(self, webdriver, root_uri=None):\n super(MainPage, self).__init__(webdriver, root_uri)\n self.open_level_menu()\n self.close_level_menu()\n\n def ensure_menu_created(self):\n if not self.level_menu_created:\n self.open_level_menu()\n self.close_level_menu()\n\n def open_level_menu(self):\n if not self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = True\n self.level_menu_created = True\n\n def close_level_menu(self):\n if self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = False\n\n def get_level_link(self, level_number):\n return PageElement(xpath=\n '//span[@class=\"level-number\" and text() = \"{0}\"]/..'.format(\n level_number))\n\n def open_level(self, level_number):\n self.open_level_menu()\n self.get_level_link(level_number).click()\n\n def css_write(self, css):\n self.css_input = css\n self.enter_button.click()\n\n def do_level1(self):\n self.open_level(1)\n self.css_write('page')\n self.level1_link.click()\n",
"step-4": "from page_objects import PageObject, PageElement\n\n\nclass MainPage(PageObject):\n level_menu_opened = False\n level_menu_created = False\n css_input = PageElement(css='input.input-strobe')\n level_text_span = PageElement(css='span.level-text')\n instruction_h2 = PageElement(css='h2.order')\n enter_button = PageElement(css='div.enter-button')\n level_menu = PageElement(xpath='//div[@class=\"level-menu-toggle-wrapper\"]')\n\n def __init__(self, webdriver, root_uri=None):\n super(MainPage, self).__init__(webdriver, root_uri)\n self.open_level_menu()\n self.close_level_menu()\n\n def ensure_menu_created(self):\n if not self.level_menu_created:\n self.open_level_menu()\n self.close_level_menu()\n\n def open_level_menu(self):\n if not self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = True\n self.level_menu_created = True\n\n def close_level_menu(self):\n if self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = False\n\n def get_level_link(self, level_number):\n return PageElement(xpath=\n '//span[@class=\"level-number\" and text() = \"{0}\"]/..'.format(\n level_number))\n\n def open_level(self, level_number):\n self.open_level_menu()\n self.get_level_link(level_number).click()\n\n def css_write(self, css):\n self.css_input = css\n self.enter_button.click()\n\n def do_level1(self):\n self.open_level(1)\n self.css_write('page')\n self.level1_link.click()\n",
"step-5": "from page_objects import PageObject, PageElement\n\n\nclass MainPage(PageObject):\n level_menu_opened = False\n level_menu_created = False\n css_input = PageElement(css='input.input-strobe')\n level_text_span = PageElement(css='span.level-text')\n instruction_h2 = PageElement(css='h2.order')\n enter_button = PageElement(css='div.enter-button')\n\n level_menu = PageElement(\n xpath='//div[@class=\"level-menu-toggle-wrapper\"]')\n\n def __init__(self, webdriver, root_uri=None):\n super(MainPage, self).__init__(webdriver, root_uri)\n # hack to initialize all the menu items\n self.open_level_menu()\n self.close_level_menu()\n\n def ensure_menu_created(self):\n if not self.level_menu_created:\n self.open_level_menu()\n self.close_level_menu()\n\n def open_level_menu(self):\n if not self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = True\n self.level_menu_created = True\n\n def close_level_menu(self):\n if self.level_menu_opened:\n self.level_menu.click()\n self.level_menu_opened = False\n\n def get_level_link(self, level_number):\n return PageElement(\n xpath='//span[@class=\"level-number\" and text() = \"{0}\"]/..'\n .format(level_number)\n )\n\n def open_level(self, level_number):\n self.open_level_menu()\n self.get_level_link(level_number).click()\n\n def css_write(self, css):\n self.css_input = css\n self.enter_button.click()\n\n def do_level1(self):\n self.open_level(1)\n self.css_write(\"page\")\n self.level1_link.click()\n",
"step-ids": [
6,
7,
10,
11,
12
]
}
|
[
6,
7,
10,
11,
12
] |
#!/usr/bin/python
import sys
class Generator:
def __init__(self, seed, factor, multiple):
self.value = seed
self.factor = factor
self.multiple = multiple
def iterate(self):
self.value = ( self.value * self.factor ) % 2147483647
# Repeat if this isn't an exact multiple
while self.value % self.multiple != 0:
self.value = ( self.value * self.factor ) % 2147483647
return self.value
# Read the input
seed_a = int(sys.argv[1])
seed_b = int(sys.argv[2])
gen_a = Generator(seed_a, 16807, 4)
gen_b = Generator(seed_b, 48271, 8)
matches = 0
for i in range(0,5000000):
val_a = gen_a.iterate()
val_b = gen_b.iterate()
# print "{0:16d}\t{1:16d}".format(val_a, val_b)
lowest16 = 2 ** 16 - 1
low_a = val_a & lowest16
low_b = val_b & lowest16
# print format(low_a, '016b')
# print format(low_b, '016b')
if ( low_a == low_b ):
matches += 1
print matches
|
normal
|
{
"blob_id": "4bb006e2e457f5b11157dacb43fe94c8b400f146",
"index": 5105,
"step-1": "#!/usr/bin/python\n\nimport sys\n\nclass Generator:\n def __init__(self, seed, factor, multiple):\n self.value = seed\n self.factor = factor\n self.multiple = multiple\n\n def iterate(self):\n self.value = ( self.value * self.factor ) % 2147483647\n # Repeat if this isn't an exact multiple\n while self.value % self.multiple != 0:\n self.value = ( self.value * self.factor ) % 2147483647\n return self.value\n\n# Read the input\n\nseed_a = int(sys.argv[1])\nseed_b = int(sys.argv[2])\n\ngen_a = Generator(seed_a, 16807, 4)\ngen_b = Generator(seed_b, 48271, 8)\n\nmatches = 0\nfor i in range(0,5000000):\n val_a = gen_a.iterate()\n val_b = gen_b.iterate()\n\n # print \"{0:16d}\\t{1:16d}\".format(val_a, val_b)\n\n lowest16 = 2 ** 16 - 1\n\n low_a = val_a & lowest16\n low_b = val_b & lowest16\n\n # print format(low_a, '016b')\n # print format(low_b, '016b')\n\n if ( low_a == low_b ):\n matches += 1\n\nprint matches\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
__author__ = 'AChen'
from rec_linked_list import *
def filter_pos_rec(lst):
"""
@type lst: LinkedListRec
>>> lst = LinkedListRec([3, -10, 4, 0])
>>> pos = filter_pos_rec(lst)
>>> str(pos)
'3 -> 4'
"""
if lst.is_empty():
return lst
else:
pos_rec = LinkedListRec([])
if lst._first > 0:
pos_rec._first = lst._first
pos_rec._rest = filter_pos_rec(lst._rest)
else:
pos_rec = filter_pos_rec(lst._rest)
return pos_rec
|
normal
|
{
"blob_id": "efcbe296ea72a94be967124a8ba8c84a524e2eb1",
"index": 66,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n",
"step-3": "__author__ = 'AChen'\n<mask token>\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n",
"step-4": "__author__ = 'AChen'\nfrom rec_linked_list import *\n\n\ndef filter_pos_rec(lst):\n \"\"\"\n @type lst: LinkedListRec\n >>> lst = LinkedListRec([3, -10, 4, 0])\n >>> pos = filter_pos_rec(lst)\n >>> str(pos)\n '3 -> 4'\n\n \"\"\"\n if lst.is_empty():\n return lst\n else:\n pos_rec = LinkedListRec([])\n if lst._first > 0:\n pos_rec._first = lst._first\n pos_rec._rest = filter_pos_rec(lst._rest)\n else:\n pos_rec = filter_pos_rec(lst._rest)\n return pos_rec\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import sys
from flask import Flask, request, abort, flash, jsonify, Response
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_migrate import Migrate
import random
import unittest
from models import db, Question, Category
# set the number of pages fpr pagination
QUESTIONS_PER_PAGE = 10
# create and configure the app
app = Flask(__name__)
app.config.from_object('config')
db.init_app(app)
migrate = Migrate(app, db)
# set up cors for the application
cors = CORS(app, resources={r'/': {'origins': '*'}})
# to set Access-Control-Allow Headers and Methods
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization, true')
response.headers.add('Access-Control-Allow-Methods',
'GET, PATCH,PUT,POST, DELETE, OPTIONS')
return response
# endpoint to handle GET requests for all available categories
@app.route('/categories', methods=['GET'])
def get_categories():
categories = [category.type for category in Category.query.all()]
return jsonify({'categories': categories, 'success': True})
# endpoint to handle GET requests for questions with pagination
@app.route('/questions/page/<int:page>', methods=['GET'])
def get_questions(page):
error = False
questions = []
total_questions = 0
# if question id is not an integer
if type(page) is not int:
# let them know their input is not processable
abort(422)
# ensure proper request method
if request.method == 'GET':
try:
# query for all categories
categories = [category.type for category in Category.query.all()]
if categories is None:
# let the user know that no resource was found
abort(404)
query = Question.query.paginate(page, per_page=10)
total_questions += len(Question.query.all())
if query is None:
# let the user know that no resource was found
abort(404)
if len(query.items) == 0:
# let the user know that no resource was found
error = True
results = query.items
# format data
for question in results:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# let the user know their request was not successful
abort(400)
else:
# if successful send back success response
return jsonify({
'success': True,
'questions': questions,
'total_questions': total_questions,
'categories': categories
})
else:
# send method not allowed error
abort(405)
# endpoint to delete a question from the database
@app.route('/question/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
error = False
# ensure proper request method
if request.method == 'DELETE':
# if question id is not an integer
if type(question_id) is not int:
# let them know their input is not processable
abort(422)
try:
# get user selected question from database
question = Question.query.get(question_id)
# stage question delete
db.session.delete(question)
# commit deletion to the database
db.session.commit()
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
# close database session
db.session.close()
if error:
# send bad request error
abort(400)
else:
# if no error send success object and log on server
return jsonify({
'success': True,
'method': 'Delete',
'question': question_id
})
else:
# send method not allowed error
abort(405)
# endpoint to add a question to the database
@app.route('/questions', methods=['POST'])
def add_question():
error = False
# ensure proper request method
if request.method == 'POST':
try:
# format data for database
new_question = Question(
question=request.json['question'],
answer=request.json['answer'],
category=request.json['category'],
difficulty=request.json['difficulty']
)
# stage data in database
db.session.add(new_question)
# commit data to database
db.session.commit()
except Exception:
# set error to true and log on the server
error = True
db.session.rollback()
print('Error: {}'.format(sys.exc_info()))
finally:
# close database session
db.session.close()
if error:
# send bad request error
abort(400)
else:
# if no error send success object and log on server
print('Added: {}'.format(new_question))
return jsonify({
'success': True,
'question': request.json
})
else:
# send method not allowed error
abort(405)
# endpoint to search for for questions in the database
@app.route('/questions/search', methods=['POST'])
def search_questions():
error = False
# ensure proper request method
if request.method == 'POST':
# set esrch term from user request
search_term = str(request.json['searchTerm'])
# if the user submits something other than a string of text block it
if type(search_term) is not str:
# let them know their input is not processable
abort(422)
try:
# query database using user provided search term
query_results = Question.query.filter(
Question.question.ilike('%{}%'.format(search_term))).all()
questions = []
# get categories from database
categories = [category.type for category in Category.query.all()]
# format response data
for question in query_results:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send bad request error
abort(400)
else:
# if no error send success object
return jsonify({
'success': True,
'questions': questions,
'total_questions': len(questions),
'current_category': ''
})
else:
# send method not allowed error
abort(405)
# endpoint to get questions by a specific category
@app.route('/category/<int:category_id>/questions', methods=['GET'])
def get_questions_by_category(category_id):
error = False
# ensure proper request method
if request.method == 'GET':
# if category id is not an integer
if type(category_id) is not int:
# let them know their input is not processable
abort(422)
try:
# get questions by user selected category
query = Question.query.filter_by(category=str(category_id)).all()
questions = []
# format response data
for question in query:
_question_ = {
'id': question.id,
'question': question.question,
'answer': question.answer,
'category': question.category,
'difficulty': question.difficulty
}
questions.append(_question_)
except Exception:
# set error to true and log on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send bad request error
abort(400)
else:
# if no error send success object
return jsonify({
'success': True,
'questions': questions,
'total_questions': len(questions),
'current_category': ''
})
else:
# send method not allowed error
abort(405)
# endpoint to initiate quiz
@app.route('/questions/quiz', methods=['POST'])
def quizzes():
error = False
# ensure proper request method
if request.method == 'POST':
try:
data = request.json
# get questions from any category
if data['quiz_category']['id'] == 0:
query = Question.query.all()
# get questions from user specified caetgory
else:
query = Question.query.filter_by(
category=str(int(data['quiz_category']['id'])+1)).all()
# randomly select new non previously selected question
previous_questions = data['previous_questions']
index = random.randint(0, len(query)-1)
potential_question = query[index]
selected = False
while selected is False:
if potential_question.id in previous_questions:
# reassign index if already used
index = random.randint(0, len(query)-1)
potential_question = query[index]
else:
selected = True
# set question
_question_ = potential_question
# format data
next_question = {
'id': _question_.id,
'question': _question_.question,
'answer': _question_.answer,
'category': _question_.category,
'difficulty': _question_.difficulty
}
except Exception:
# set error and log error on the server
error = True
print('Error: {}'.format(sys.exc_info()))
finally:
if error:
# send internal server error
abort(500)
else:
# if no error send success object
return jsonify({
'success': True,
'question': next_question
})
else:
# send method not allowed error
abort(405)
# handle bad request errors
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad Request"
}), 400
# handle resource not found errors
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource Not Found"
}), 404
# handle resource not found errors
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 405,
"message": "Method Not Allowed"
}), 405
# handle unprocessable entity errors
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable Entity"
}), 422
# handle internal server errors
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({
"success": False,
"error": 500,
"message": "Internal Server Error"
}), 500
# Default port:
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "b84a2093a51e57c448ee7b4f5a89d69dfb14b1b6",
"index": 4876,
"step-1": "<mask token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\n<mask token>\n\n\[email protected]('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\[email protected]('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\[email protected]('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\n<mask token>\n\n\[email protected]('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\n<mask token>\n\n\[email protected](405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\[email protected](422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\[email protected](500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\[email protected]('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n\[email protected]('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\[email protected]('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\[email protected]('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\[email protected]('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n if request.method == 'POST':\n search_term = str(request.json['searchTerm'])\n if type(search_term) is not str:\n abort(422)\n try:\n query_results = Question.query.filter(Question.question.ilike(\n '%{}%'.format(search_term))).all()\n questions = []\n categories = [category.type for category in Category.query.all()]\n for question in query_results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\n<mask token>\n\n\[email protected]('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\n<mask token>\n\n\[email protected](405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\[email protected](422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\[email protected](500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\[email protected]('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n\[email protected]('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\[email protected]('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\[email protected]('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\[email protected]('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n if request.method == 'POST':\n search_term = str(request.json['searchTerm'])\n if type(search_term) is not str:\n abort(422)\n try:\n query_results = Question.query.filter(Question.question.ilike(\n '%{}%'.format(search_term))).all()\n questions = []\n categories = [category.type for category in Category.query.all()]\n for question in query_results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\n<mask token>\n\n\[email protected]('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\[email protected](400)\ndef bad_request(error):\n return jsonify({'success': False, 'error': 400, 'message': 'Bad Request'}\n ), 400\n\n\[email protected](404)\ndef resource_not_found(error):\n return jsonify({'success': False, 'error': 404, 'message':\n 'Resource Not Found'}), 404\n\n\[email protected](405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\[email protected](422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\[email protected](500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\n<mask token>\n",
"step-4": "<mask token>\napp.config.from_object('config')\ndb.init_app(app)\n<mask token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n\[email protected]('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n\[email protected]('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n if type(page) is not int:\n abort(422)\n if request.method == 'GET':\n try:\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n abort(404)\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n abort(404)\n if len(query.items) == 0:\n error = True\n results = query.items\n for question in results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': total_questions, 'categories':\n categories})\n else:\n abort(405)\n\n\[email protected]('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n if request.method == 'DELETE':\n if type(question_id) is not int:\n abort(422)\n try:\n question = Question.query.get(question_id)\n db.session.delete(question)\n db.session.commit()\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'method': 'Delete',\n 'question': question_id})\n else:\n abort(405)\n\n\[email protected]('/questions', methods=['POST'])\ndef add_question():\n error = False\n if request.method == 'POST':\n try:\n new_question = Question(question=request.json['question'],\n answer=request.json['answer'], category=request.json[\n 'category'], difficulty=request.json['difficulty'])\n db.session.add(new_question)\n db.session.commit()\n except Exception:\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n print('Added: {}'.format(new_question))\n return jsonify({'success': True, 'question': request.json})\n else:\n abort(405)\n\n\[email protected]('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n if request.method == 'POST':\n search_term = str(request.json['searchTerm'])\n if type(search_term) is not str:\n abort(422)\n try:\n query_results = Question.query.filter(Question.question.ilike(\n '%{}%'.format(search_term))).all()\n questions = []\n categories = [category.type for category in Category.query.all()]\n for question in query_results:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\[email protected]('/category/<int:category_id>/questions', methods=['GET'])\ndef get_questions_by_category(category_id):\n error = False\n if request.method == 'GET':\n if type(category_id) is not int:\n abort(422)\n try:\n query = Question.query.filter_by(category=str(category_id)).all()\n questions = []\n for question in query:\n _question_ = {'id': question.id, 'question': question.\n question, 'answer': question.answer, 'category':\n question.category, 'difficulty': question.difficulty}\n questions.append(_question_)\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(400)\n else:\n return jsonify({'success': True, 'questions': questions,\n 'total_questions': len(questions), 'current_category': ''})\n else:\n abort(405)\n\n\[email protected]('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n if request.method == 'POST':\n try:\n data = request.json\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n else:\n query = Question.query.filter_by(category=str(int(data[\n 'quiz_category']['id']) + 1)).all()\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n index = random.randint(0, len(query) - 1)\n potential_question = query[index]\n else:\n selected = True\n _question_ = potential_question\n next_question = {'id': _question_.id, 'question': _question_.\n question, 'answer': _question_.answer, 'category':\n _question_.category, 'difficulty': _question_.difficulty}\n except Exception:\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n if error:\n abort(500)\n else:\n return jsonify({'success': True, 'question': next_question})\n else:\n abort(405)\n\n\[email protected](400)\ndef bad_request(error):\n return jsonify({'success': False, 'error': 400, 'message': 'Bad Request'}\n ), 400\n\n\[email protected](404)\ndef resource_not_found(error):\n return jsonify({'success': False, 'error': 404, 'message':\n 'Resource Not Found'}), 404\n\n\[email protected](405)\ndef method_not_allowed(error):\n return jsonify({'success': False, 'error': 405, 'message':\n 'Method Not Allowed'}), 405\n\n\[email protected](422)\ndef unprocessable_entity(error):\n return jsonify({'success': False, 'error': 422, 'message':\n 'Unprocessable Entity'}), 422\n\n\[email protected](500)\ndef internal_server_error(error):\n return jsonify({'success': False, 'error': 500, 'message':\n 'Internal Server Error'}), 500\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "import os\nimport sys\nfrom flask import Flask, request, abort, flash, jsonify, Response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nimport random\nimport unittest\n\nfrom models import db, Question, Category\n\n# set the number of pages fpr pagination\nQUESTIONS_PER_PAGE = 10\n\n# create and configure the app\napp = Flask(__name__)\napp.config.from_object('config')\ndb.init_app(app)\nmigrate = Migrate(app, db)\n\n# set up cors for the application\ncors = CORS(app, resources={r'/': {'origins': '*'}})\n\n# to set Access-Control-Allow Headers and Methods\[email protected]_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization, true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, PATCH,PUT,POST, DELETE, OPTIONS')\n return response\n\n# endpoint to handle GET requests for all available categories\[email protected]('/categories', methods=['GET'])\ndef get_categories():\n categories = [category.type for category in Category.query.all()]\n return jsonify({'categories': categories, 'success': True})\n\n# endpoint to handle GET requests for questions with pagination\[email protected]('/questions/page/<int:page>', methods=['GET'])\ndef get_questions(page):\n error = False\n questions = []\n total_questions = 0\n # if question id is not an integer\n if type(page) is not int:\n # let them know their input is not processable\n abort(422)\n # ensure proper request method\n if request.method == 'GET':\n try:\n # query for all categories\n categories = [category.type for category in Category.query.all()]\n if categories is None:\n # let the user know that no resource was found\n abort(404)\n\n query = Question.query.paginate(page, per_page=10)\n total_questions += len(Question.query.all())\n if query is None:\n # let the user know that no resource was found\n abort(404)\n if len(query.items) == 0:\n # let the user know that no resource was found\n error = True\n\n results = query.items\n # format data\n for question in results:\n _question_ = {\n 'id': question.id,\n 'question': question.question,\n 'answer': question.answer,\n 'category': question.category,\n 'difficulty': question.difficulty\n }\n questions.append(_question_)\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n finally:\n\n if error:\n # let the user know their request was not successful\n abort(400)\n else:\n # if successful send back success response\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': total_questions,\n 'categories': categories\n })\n else:\n # send method not allowed error\n abort(405)\n\n\n# endpoint to delete a question from the database\[email protected]('/question/<int:question_id>', methods=['DELETE'])\ndef delete_question(question_id):\n error = False\n\n # ensure proper request method\n if request.method == 'DELETE':\n\n # if question id is not an integer\n if type(question_id) is not int:\n # let them know their input is not processable\n abort(422)\n\n try:\n # get user selected question from database\n question = Question.query.get(question_id)\n # stage question delete\n db.session.delete(question)\n # commit deletion to the database\n db.session.commit()\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n # close database session\n db.session.close()\n\n if error:\n # send bad request error\n abort(400)\n\n else:\n # if no error send success object and log on server\n return jsonify({\n 'success': True,\n 'method': 'Delete',\n 'question': question_id\n })\n else:\n # send method not allowed error\n abort(405)\n\n\n# endpoint to add a question to the database\[email protected]('/questions', methods=['POST'])\ndef add_question():\n error = False\n\n # ensure proper request method\n if request.method == 'POST':\n try:\n # format data for database\n new_question = Question(\n question=request.json['question'],\n answer=request.json['answer'],\n category=request.json['category'],\n difficulty=request.json['difficulty']\n )\n # stage data in database\n db.session.add(new_question)\n # commit data to database\n db.session.commit()\n\n except Exception:\n # set error to true and log on the server\n error = True\n db.session.rollback()\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n # close database session\n db.session.close()\n\n if error:\n # send bad request error\n abort(400)\n else:\n # if no error send success object and log on server\n print('Added: {}'.format(new_question))\n return jsonify({\n 'success': True,\n 'question': request.json\n\n })\n else:\n # send method not allowed error\n abort(405)\n\n\n# endpoint to search for for questions in the database\[email protected]('/questions/search', methods=['POST'])\ndef search_questions():\n error = False\n\n # ensure proper request method\n if request.method == 'POST':\n\n # set esrch term from user request\n search_term = str(request.json['searchTerm'])\n # if the user submits something other than a string of text block it\n if type(search_term) is not str:\n # let them know their input is not processable\n abort(422)\n\n try:\n # query database using user provided search term\n query_results = Question.query.filter(\n Question.question.ilike('%{}%'.format(search_term))).all()\n questions = []\n # get categories from database\n categories = [category.type for category in Category.query.all()]\n # format response data\n for question in query_results:\n _question_ = {\n 'id': question.id,\n 'question': question.question,\n 'answer': question.answer,\n 'category': question.category,\n 'difficulty': question.difficulty\n }\n questions.append(_question_)\n\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n if error:\n # send bad request error\n abort(400)\n else:\n # if no error send success object\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n 'current_category': ''\n })\n else:\n # send method not allowed error\n abort(405)\n\n# endpoint to get questions by a specific category\[email protected]('/category/<int:category_id>/questions', methods=['GET'])\ndef get_questions_by_category(category_id):\n error = False\n\n # ensure proper request method\n if request.method == 'GET':\n\n # if category id is not an integer\n if type(category_id) is not int:\n # let them know their input is not processable\n abort(422)\n\n try:\n # get questions by user selected category\n query = Question.query.filter_by(category=str(category_id)).all()\n questions = []\n # format response data\n for question in query:\n _question_ = {\n 'id': question.id,\n 'question': question.question,\n 'answer': question.answer,\n 'category': question.category,\n 'difficulty': question.difficulty\n }\n questions.append(_question_)\n except Exception:\n # set error to true and log on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n if error:\n # send bad request error\n abort(400)\n else:\n # if no error send success object\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total_questions': len(questions),\n 'current_category': ''\n })\n else:\n # send method not allowed error\n abort(405)\n\n# endpoint to initiate quiz\[email protected]('/questions/quiz', methods=['POST'])\ndef quizzes():\n error = False\n\n # ensure proper request method\n if request.method == 'POST':\n\n try:\n data = request.json\n # get questions from any category\n if data['quiz_category']['id'] == 0:\n query = Question.query.all()\n # get questions from user specified caetgory\n else:\n query = Question.query.filter_by(\n category=str(int(data['quiz_category']['id'])+1)).all()\n # randomly select new non previously selected question\n previous_questions = data['previous_questions']\n index = random.randint(0, len(query)-1)\n potential_question = query[index]\n selected = False\n while selected is False:\n if potential_question.id in previous_questions:\n # reassign index if already used\n index = random.randint(0, len(query)-1)\n potential_question = query[index]\n else:\n selected = True\n # set question\n _question_ = potential_question\n # format data\n next_question = {\n 'id': _question_.id,\n 'question': _question_.question,\n 'answer': _question_.answer,\n 'category': _question_.category,\n 'difficulty': _question_.difficulty\n }\n except Exception:\n # set error and log error on the server\n error = True\n print('Error: {}'.format(sys.exc_info()))\n\n finally:\n\n if error:\n # send internal server error\n abort(500)\n else:\n # if no error send success object\n return jsonify({\n 'success': True,\n 'question': next_question\n })\n else:\n # send method not allowed error\n abort(405)\n\n# handle bad request errors\[email protected](400)\ndef bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"Bad Request\"\n }), 400\n\n# handle resource not found errors\[email protected](404)\ndef resource_not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Resource Not Found\"\n }), 404\n\n# handle resource not found errors\[email protected](405)\ndef method_not_allowed(error):\n return jsonify({\n \"success\": False,\n \"error\": 405,\n \"message\": \"Method Not Allowed\"\n }), 405\n\n# handle unprocessable entity errors\[email protected](422)\ndef unprocessable_entity(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"Unprocessable Entity\"\n }), 422\n\n# handle internal server errors\[email protected](500)\ndef internal_server_error(error):\n return jsonify({\n \"success\": False,\n \"error\": 500,\n \"message\": \"Internal Server Error\"\n }), 500\n\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n",
"step-ids": [
8,
10,
12,
14,
17
]
}
|
[
8,
10,
12,
14,
17
] |
from django.shortcuts import render
from django.template import loader
# Create your views here.
from django.http import HttpResponse
from .models import Student
def index(request):
student_objects = Student.objects.all()
context = {"students": student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
def newstudent(request):
student_entered_name = request.GET.get('name')
Student.objects.create(name=student_entered_name)
print(student_entered_name)
context = {}
return render(request, 'student_list.html', context)
|
normal
|
{
"blob_id": "00e8e0b5aeccd2a67f6cfdad63012a0d8b066e6f",
"index": 9551,
"step-1": "<mask token>\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\ndef newstudent(request):\n student_entered_name = request.GET.get('name')\n Student.objects.create(name=student_entered_name)\n print(student_entered_name)\n context = {}\n return render(request, 'student_list.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom .models import Student\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\ndef newstudent(request):\n student_entered_name = request.GET.get('name')\n Student.objects.create(name=student_entered_name)\n print(student_entered_name)\n context = {}\n return render(request, 'student_list.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom django.template import loader\n\n# Create your views here.\n\nfrom django.http import HttpResponse\n\nfrom .models import Student\n\ndef index(request):\n\tstudent_objects = Student.objects.all()\n\tcontext = {\"students\": student_objects}\n\treturn render(request, 'student_list.html', context)\n\ndef addstudent(request):\n\tcontext = {}\n\treturn render(request, 'add_student.html', context)\n\ndef newstudent(request):\n\tstudent_entered_name = request.GET.get('name')\n\tStudent.objects.create(name=student_entered_name)\n\tprint(student_entered_name)\n\tcontext = {}\n\treturn render(request, 'student_list.html', context)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import bisect
import sys
input = sys.stdin.readline
N = int(input())
A = [int(input()) for _ in range(N)]
dp = [float('inf')] * (N + 1)
for a in A[::-1]:
idx = bisect.bisect_right(dp, a)
dp[idx] = a
ans = 0
for n in dp:
if n != float('inf'):
ans += 1
print(ans)
|
normal
|
{
"blob_id": "dfe79d2f4bf4abc1d04035cf4556237a53c01122",
"index": 6913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor a in A[::-1]:\n idx = bisect.bisect_right(dp, a)\n dp[idx] = a\n<mask token>\nfor n in dp:\n if n != float('inf'):\n ans += 1\nprint(ans)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nA = [int(input()) for _ in range(N)]\ndp = [float('inf')] * (N + 1)\nfor a in A[::-1]:\n idx = bisect.bisect_right(dp, a)\n dp[idx] = a\nans = 0\nfor n in dp:\n if n != float('inf'):\n ans += 1\nprint(ans)\n",
"step-4": "import bisect\nimport sys\ninput = sys.stdin.readline\nN = int(input())\nA = [int(input()) for _ in range(N)]\ndp = [float('inf')] * (N + 1)\nfor a in A[::-1]:\n idx = bisect.bisect_right(dp, a)\n dp[idx] = a\nans = 0\nfor n in dp:\n if n != float('inf'):\n ans += 1\nprint(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import os, sys, json
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python_task_helper', 'files'))
from task_helper import TaskHelper
hosts_file = open("/etc/hosts", "r").read()
resolv_file = open("/etc/resolv.conf", "r").read()
output = hosts_file + resolv_file
class Generate(TaskHelper):
def task(self, args):
return {'result': output}
if __name__ == '__main__':
Generate().run()
|
normal
|
{
"blob_id": "24813e03de05058925a42847042157fa65450d21",
"index": 3773,
"step-1": "<mask token>\n\n\nclass Generate(TaskHelper):\n\n def task(self, args):\n return {'result': output}\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',\n 'python_task_helper', 'files'))\n<mask token>\n\n\nclass Generate(TaskHelper):\n\n def task(self, args):\n return {'result': output}\n\n\nif __name__ == '__main__':\n Generate().run()\n",
"step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',\n 'python_task_helper', 'files'))\n<mask token>\nhosts_file = open('/etc/hosts', 'r').read()\nresolv_file = open('/etc/resolv.conf', 'r').read()\noutput = hosts_file + resolv_file\n\n\nclass Generate(TaskHelper):\n\n def task(self, args):\n return {'result': output}\n\n\nif __name__ == '__main__':\n Generate().run()\n",
"step-4": "import os, sys, json\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',\n 'python_task_helper', 'files'))\nfrom task_helper import TaskHelper\nhosts_file = open('/etc/hosts', 'r').read()\nresolv_file = open('/etc/resolv.conf', 'r').read()\noutput = hosts_file + resolv_file\n\n\nclass Generate(TaskHelper):\n\n def task(self, args):\n return {'result': output}\n\n\nif __name__ == '__main__':\n Generate().run()\n",
"step-5": "#!/usr/bin/env python\nimport os, sys, json\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python_task_helper', 'files'))\nfrom task_helper import TaskHelper\n\n\nhosts_file = open(\"/etc/hosts\", \"r\").read()\nresolv_file = open(\"/etc/resolv.conf\", \"r\").read()\n\noutput = hosts_file + resolv_file\n\nclass Generate(TaskHelper):\n def task(self, args):\n return {'result': output}\n\nif __name__ == '__main__':\n Generate().run()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Test(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Contact(models.Model):
GENDER_TYPES = (
('M', u'男'),
('F', u'女'),
('X', u'不告诉你'),
)
name = models.CharField(u'姓名', max_length=20)
age = models.IntegerField(u'年龄', default=0)
gender = models.CharField(u'性别', max_length=1, null=False, blank=False, choices=GENDER_TYPES, default='X')
email = models.EmailField()
tele = models.CharField(u'电话', max_length=20)
address = models.CharField(u'地址', max_length=200)
postcode = models.CharField(u'邮政编码', max_length=6)
notes = models.CharField(u'备注', max_length=200)
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
|
normal
|
{
"blob_id": "514a3fc312d36e6f9b601ede7f7a3940c138d39a",
"index": 2000,
"step-1": "<mask token>\n\n\nclass Contact(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Test(models.Model):\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\n\n# Create your models here.\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = (\n ('M', u'男'),\n ('F', u'女'),\n ('X', u'不告诉你'),\n )\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False, choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'ots-common.gypi',
],
'targets': [
{
'target_name': 'ots',
'type': '<(library)',
'sources': [
'<@(ots_sources)',
],
'include_dirs': [
'<@(ots_include_dirs)',
],
'direct_dependent_settings': {
'include_dirs': [
'<@(ots_include_dirs)',
],
},
'dependencies': [
'../zlib/zlib.gyp:zlib',
],
},
],
}
|
normal
|
{
"blob_id": "7413d4e98f79bf7b389a6305257833293714fc81",
"index": 1786,
"step-1": "<mask token>\n",
"step-2": "{'variables': {'chromium_code': 1}, 'includes': ['ots-common.gypi'],\n 'targets': [{'target_name': 'ots', 'type': '<(library)', 'sources': [\n '<@(ots_sources)'], 'include_dirs': ['<@(ots_include_dirs)'],\n 'direct_dependent_settings': {'include_dirs': ['<@(ots_include_dirs)']},\n 'dependencies': ['../zlib/zlib.gyp:zlib']}]}\n",
"step-3": "# Copyright (c) 2009 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n{\n 'variables': {\n 'chromium_code': 1,\n },\n 'includes': [\n 'ots-common.gypi',\n ],\n 'targets': [\n {\n 'target_name': 'ots',\n 'type': '<(library)',\n 'sources': [\n '<@(ots_sources)',\n ],\n 'include_dirs': [\n '<@(ots_include_dirs)',\n ],\n 'direct_dependent_settings': {\n 'include_dirs': [\n '<@(ots_include_dirs)',\n ],\n },\n 'dependencies': [\n '../zlib/zlib.gyp:zlib',\n ],\n },\n ],\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import sys
import random
import pygame
import time
from pygame import locals
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 1024
class Moto(pygame.sprite.Sprite):
def __init__(self, player_num, start_direction):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("motor" + str(player_num) + ".png").convert()
self.orig_image = self.image
# Fetch the rectangle object that has the dimensions of the image
# Update the position of this object by setting the values of rect.x and rect.y
self.rect = self.image.get_rect()
self.direction = start_direction
def move_single_axis(self, dx, dy):
# Move the rect
self.rect.x += dx
self.rect.y += dy
def moveRight(self):
self.direction = 0
self.image = pygame.transform.rotate(self.orig_image, 0)
def moveLeft(self):
self.direction = 1
self.image = pygame.transform.rotate(self.orig_image, 0)
def moveUp(self):
self.direction = 2
self.image = pygame.transform.rotate(self.orig_image, 90)
def moveDown(self):
self.direction = 3
self.image = pygame.transform.rotate(self.orig_image, 90)
# Class for the orange dude
class Player(object):
def __init__(self, player_num, px, py, sx, sy, start_direction):
self.player_num = player_num
self.rect = pygame.Rect(px, py, sx, sy)
self.direction = start_direction
self.moto = Moto(player_num, start_direction)
self.moto.rect.x = px
self.moto.rect.y = py
def moveRight(self):
if self.direction != 1:
self.direction = 0
self.moto.moveRight()
def moveLeft(self):
if self.direction != 0:
self.direction = 1
self.moto.moveLeft()
def moveUp(self):
if self.direction != 3:
self.direction = 2
self.moto.moveUp()
def moveDown(self):
if self.direction != 2:
self.direction = 3
self.moto.moveDown()
def moveOn(self):
if self.direction == 0:
self.move(2, 0)
if self.direction == 1:
self.move(-2, 0)
if self.direction == 2:
self.move(0, -2)
if self.direction == 3:
self.move(0, 2)
def move(self, dx, dy):
# Move each axis separately. Note that this checks for collisions both times.
if dx != 0:
self.move_single_axis(dx, 0)
self.moto.move_single_axis(dx, 0)
if dy != 0:
self.move_single_axis(0, dy)
self.moto.move_single_axis(0, dy)
def move_single_axis(self, dx, dy):
# Move the rect
self.rect.x += dx
self.rect.y += dy
# Draw a wall (after the movement)
Wall(self.player_num, (self.rect.centerx, self.rect.centery))
# Nice class to hold a wall rect
class Wall(object):
def __init__(self, player_num, pos):
Game.walls[player_num].append(self)
self.rect = pygame.Rect(pos[0], pos[1], 3, 3)
# MAIN
class Game:
walls = [[], []]
def main(self):
winner = 0
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
# walls for 2 players: lists in list
Game.walls = [[], []]
# starting positions
player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)
player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)
# JOYSTICK
try:
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
joysticks[0].init()
joysticks[1].init()
player1_joystick = joysticks[0]
player2_joystick = joysticks[1]
except IndexError:
player1_joystick = None
player2_joystick = None
end = pygame.image.load('number3.png')
screen.fill((0, 0, 0))
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
pygame.time.wait(1000)
end = pygame.image.load('number2.png')
screen.fill((0, 0, 0))
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
pygame.time.wait(1000)
end = pygame.image.load('number1.png')
screen.fill((0, 0, 0))
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
pygame.time.wait(1000)
# end = pygame.image.load('arcade.jpg').convert()
# screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))
pygame.display.flip()
# background_image = pygame.transform.scale(pygame.image.load('arcade.jpg').convert(), (1280, 1024))
# screen.blit(background_image, [0, 0])
running = True
while running:
clock.tick(60)
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
running = False
# JOYSTICK
try:
if e.type == pygame.locals.JOYAXISMOTION:
player1jx, player1jy = player1_joystick.get_axis(0), player1_joystick.get_axis(1)
if player1jx < 0:
player2.moveLeft()
if player1jx > 0:
player2.moveRight()
if player1jy < 0:
player2.moveUp()
if player1jy > 0:
player2.moveDown()
player2jx, player2jy = player2_joystick.get_axis(0), player2_joystick.get_axis(1)
if player2jx < 0:
player.moveLeft()
if player2jx > 0:
player.moveRight()
if player2jy < 0:
player.moveUp()
if player2jy > 0:
player.moveDown()
except:
pass
# PLAYER 1
# Move the player if an arrow key is pressed
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
player.moveLeft()
if key[pygame.K_RIGHT]:
player.moveRight()
if key[pygame.K_UP]:
player.moveUp()
if key[pygame.K_DOWN]:
player.moveDown()
player.moveOn()
# PLAYER 2
key = pygame.key.get_pressed()
if key[pygame.K_a]:
player2.moveLeft()
if key[pygame.K_d]:
player2.moveRight()
if key[pygame.K_w]:
player2.moveUp()
if key[pygame.K_s]:
player2.moveDown()
player2.moveOn()
# check borders
if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:
winner = 2
running = False
if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:
winner = 1
running = False
if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:
winner = 2
running = False
if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:
winner = 1
running = False
# Draw the scene
# screen.blit(background_image, [0, 0])
# pygame.display.flip()
screen.fill((0, 0, 0))
# Player 1 walls
counter1 = 0
counter2 = 0
coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)
coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / 2 + 10)
for wall in Game.walls[0]:
if player2.moto.rect.colliderect(wall.rect):
winner = 1
running = False
if (counter1 < coll_range) and player.moto.rect.colliderect(wall.rect):
winner = 2
running = False
counter1 += 1
pygame.draw.rect(screen, (255, 0, 0), wall.rect)
# Player 2 walls
for wall in Game.walls[1]:
if player.moto.rect.colliderect(wall.rect):
winner = 2
running = False
if (counter2 < coll_range_2) and player2.moto.rect.colliderect(wall.rect):
winner = 1
running = False
counter2 += 1
pygame.draw.rect(screen, (0, 0, 255), wall.rect)
# Player 1
pygame.draw.rect(screen, (255, 200, 0), player.rect)
screen.blit(player.moto.image, (player.moto.rect.x, player.moto.rect.y))
# Player 2
pygame.draw.rect(screen, (255, 200, 0), player2.rect)
screen.blit(player2.moto.image, (player2.moto.rect.x, player2.moto.rect.y))
pygame.display.flip()
# GAME OVER
print("Winner: ", winner)
running = True
clock = pygame.time.Clock()
sound = pygame.mixer.Sound('blast.wav')
sound.play(loops=0, maxtime=0, fade_ms=0)
while running:
clock.tick(60)
for e in pygame.event.get():
if e.type == pygame.JOYBUTTONDOWN:
player1Button = player1_joystick.get_button(0)
if (player1Button > 0):
running = False
print("BACK TO MENU")
return True
player2Button = player2_joystick.get_button(0)
if (player2Button > 0):
running = False
print("BACK TO MENU")
return True
if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or e.key == pygame.K_RETURN):
running = False
print("BACK TO MENU")
return True
end = pygame.image.load('gameover.png')
screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 1024), (0.5 * SCREEN_HEIGHT) - (0.5 * 768)))
screen.fill((0, 0, 0))
screen.blit(end, (10, 10))
if winner == 2:
myfont = pygame.font.SysFont("monospace", 72)
label = myfont.render('Blue won!', 1, (0, 0, 225))
screen.blit(label, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 750)))
else:
myfont = pygame.font.SysFont("monospace", 72)
label = myfont.render('Red won!', 1, (255, 0, 0))
screen.blit(label, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 750)))
pygame.display.flip()
|
normal
|
{
"blob_id": "1d1f1c9b70ca487b48593c85c3e0b5afc10f0b07",
"index": 6642,
"step-1": "<mask token>\n\n\nclass Player(object):\n\n def __init__(self, player_num, px, py, sx, sy, start_direction):\n self.player_num = player_num\n self.rect = pygame.Rect(px, py, sx, sy)\n self.direction = start_direction\n self.moto = Moto(player_num, start_direction)\n self.moto.rect.x = px\n self.moto.rect.y = py\n\n def moveRight(self):\n if self.direction != 1:\n self.direction = 0\n self.moto.moveRight()\n\n def moveLeft(self):\n if self.direction != 0:\n self.direction = 1\n self.moto.moveLeft()\n\n def moveUp(self):\n if self.direction != 3:\n self.direction = 2\n self.moto.moveUp()\n\n def moveDown(self):\n if self.direction != 2:\n self.direction = 3\n self.moto.moveDown()\n\n def moveOn(self):\n if self.direction == 0:\n self.move(2, 0)\n if self.direction == 1:\n self.move(-2, 0)\n if self.direction == 2:\n self.move(0, -2)\n if self.direction == 3:\n self.move(0, 2)\n <mask token>\n\n def move_single_axis(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n Wall(self.player_num, (self.rect.centerx, self.rect.centery))\n\n\nclass Wall(object):\n\n def __init__(self, player_num, pos):\n Game.walls[player_num].append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 3, 3)\n\n\nclass Game:\n walls = [[], []]\n\n def main(self):\n winner = 0\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n clock = pygame.time.Clock()\n Game.walls = [[], []]\n player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)\n player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)\n try:\n pygame.joystick.init()\n joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.\n joystick.get_count())]\n joysticks[0].init()\n joysticks[1].init()\n player1_joystick = joysticks[0]\n player2_joystick = joysticks[1]\n except IndexError:\n player1_joystick = None\n player2_joystick = None\n end = pygame.image.load('number3.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number2.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number1.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n pygame.display.flip()\n running = True\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n try:\n if e.type == pygame.locals.JOYAXISMOTION:\n player1jx, player1jy = player1_joystick.get_axis(0\n ), player1_joystick.get_axis(1)\n if player1jx < 0:\n player2.moveLeft()\n if player1jx > 0:\n player2.moveRight()\n if player1jy < 0:\n player2.moveUp()\n if player1jy > 0:\n player2.moveDown()\n player2jx, player2jy = player2_joystick.get_axis(0\n ), player2_joystick.get_axis(1)\n if player2jx < 0:\n player.moveLeft()\n if player2jx > 0:\n player.moveRight()\n if player2jy < 0:\n player.moveUp()\n if player2jy > 0:\n player.moveDown()\n except:\n pass\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n player.moveLeft()\n if key[pygame.K_RIGHT]:\n player.moveRight()\n if key[pygame.K_UP]:\n player.moveUp()\n if key[pygame.K_DOWN]:\n player.moveDown()\n player.moveOn()\n key = pygame.key.get_pressed()\n if key[pygame.K_a]:\n player2.moveLeft()\n if key[pygame.K_d]:\n player2.moveRight()\n if key[pygame.K_w]:\n player2.moveUp()\n if key[pygame.K_s]:\n player2.moveDown()\n player2.moveOn()\n if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:\n winner = 2\n running = False\n if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:\n winner = 1\n running = False\n if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:\n winner = 2\n running = False\n if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:\n winner = 1\n running = False\n screen.fill((0, 0, 0))\n counter1 = 0\n counter2 = 0\n coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)\n coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / \n 2 + 10)\n for wall in Game.walls[0]:\n if player2.moto.rect.colliderect(wall.rect):\n winner = 1\n running = False\n if counter1 < coll_range and player.moto.rect.colliderect(wall\n .rect):\n winner = 2\n running = False\n counter1 += 1\n pygame.draw.rect(screen, (255, 0, 0), wall.rect)\n for wall in Game.walls[1]:\n if player.moto.rect.colliderect(wall.rect):\n winner = 2\n running = False\n if counter2 < coll_range_2 and player2.moto.rect.colliderect(\n wall.rect):\n winner = 1\n running = False\n counter2 += 1\n pygame.draw.rect(screen, (0, 0, 255), wall.rect)\n pygame.draw.rect(screen, (255, 200, 0), player.rect)\n screen.blit(player.moto.image, (player.moto.rect.x, player.moto\n .rect.y))\n pygame.draw.rect(screen, (255, 200, 0), player2.rect)\n screen.blit(player2.moto.image, (player2.moto.rect.x, player2.\n moto.rect.y))\n pygame.display.flip()\n print('Winner: ', winner)\n running = True\n clock = pygame.time.Clock()\n sound = pygame.mixer.Sound('blast.wav')\n sound.play(loops=0, maxtime=0, fade_ms=0)\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.JOYBUTTONDOWN:\n player1Button = player1_joystick.get_button(0)\n if player1Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n player2Button = player2_joystick.get_button(0)\n if player2Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or\n e.key == pygame.K_RETURN):\n running = False\n print('BACK TO MENU')\n return True\n end = pygame.image.load('gameover.png')\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 1024, 0.5 *\n SCREEN_HEIGHT - 0.5 * 768))\n screen.fill((0, 0, 0))\n screen.blit(end, (10, 10))\n if winner == 2:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Blue won!', 1, (0, 0, 225))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n else:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Red won!', 1, (255, 0, 0))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n pygame.display.flip()\n",
"step-2": "<mask token>\n\n\nclass Moto(pygame.sprite.Sprite):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Player(object):\n\n def __init__(self, player_num, px, py, sx, sy, start_direction):\n self.player_num = player_num\n self.rect = pygame.Rect(px, py, sx, sy)\n self.direction = start_direction\n self.moto = Moto(player_num, start_direction)\n self.moto.rect.x = px\n self.moto.rect.y = py\n\n def moveRight(self):\n if self.direction != 1:\n self.direction = 0\n self.moto.moveRight()\n\n def moveLeft(self):\n if self.direction != 0:\n self.direction = 1\n self.moto.moveLeft()\n\n def moveUp(self):\n if self.direction != 3:\n self.direction = 2\n self.moto.moveUp()\n\n def moveDown(self):\n if self.direction != 2:\n self.direction = 3\n self.moto.moveDown()\n\n def moveOn(self):\n if self.direction == 0:\n self.move(2, 0)\n if self.direction == 1:\n self.move(-2, 0)\n if self.direction == 2:\n self.move(0, -2)\n if self.direction == 3:\n self.move(0, 2)\n\n def move(self, dx, dy):\n if dx != 0:\n self.move_single_axis(dx, 0)\n self.moto.move_single_axis(dx, 0)\n if dy != 0:\n self.move_single_axis(0, dy)\n self.moto.move_single_axis(0, dy)\n\n def move_single_axis(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n Wall(self.player_num, (self.rect.centerx, self.rect.centery))\n\n\nclass Wall(object):\n\n def __init__(self, player_num, pos):\n Game.walls[player_num].append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 3, 3)\n\n\nclass Game:\n walls = [[], []]\n\n def main(self):\n winner = 0\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n clock = pygame.time.Clock()\n Game.walls = [[], []]\n player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)\n player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)\n try:\n pygame.joystick.init()\n joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.\n joystick.get_count())]\n joysticks[0].init()\n joysticks[1].init()\n player1_joystick = joysticks[0]\n player2_joystick = joysticks[1]\n except IndexError:\n player1_joystick = None\n player2_joystick = None\n end = pygame.image.load('number3.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number2.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number1.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n pygame.display.flip()\n running = True\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n try:\n if e.type == pygame.locals.JOYAXISMOTION:\n player1jx, player1jy = player1_joystick.get_axis(0\n ), player1_joystick.get_axis(1)\n if player1jx < 0:\n player2.moveLeft()\n if player1jx > 0:\n player2.moveRight()\n if player1jy < 0:\n player2.moveUp()\n if player1jy > 0:\n player2.moveDown()\n player2jx, player2jy = player2_joystick.get_axis(0\n ), player2_joystick.get_axis(1)\n if player2jx < 0:\n player.moveLeft()\n if player2jx > 0:\n player.moveRight()\n if player2jy < 0:\n player.moveUp()\n if player2jy > 0:\n player.moveDown()\n except:\n pass\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n player.moveLeft()\n if key[pygame.K_RIGHT]:\n player.moveRight()\n if key[pygame.K_UP]:\n player.moveUp()\n if key[pygame.K_DOWN]:\n player.moveDown()\n player.moveOn()\n key = pygame.key.get_pressed()\n if key[pygame.K_a]:\n player2.moveLeft()\n if key[pygame.K_d]:\n player2.moveRight()\n if key[pygame.K_w]:\n player2.moveUp()\n if key[pygame.K_s]:\n player2.moveDown()\n player2.moveOn()\n if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:\n winner = 2\n running = False\n if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:\n winner = 1\n running = False\n if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:\n winner = 2\n running = False\n if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:\n winner = 1\n running = False\n screen.fill((0, 0, 0))\n counter1 = 0\n counter2 = 0\n coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)\n coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / \n 2 + 10)\n for wall in Game.walls[0]:\n if player2.moto.rect.colliderect(wall.rect):\n winner = 1\n running = False\n if counter1 < coll_range and player.moto.rect.colliderect(wall\n .rect):\n winner = 2\n running = False\n counter1 += 1\n pygame.draw.rect(screen, (255, 0, 0), wall.rect)\n for wall in Game.walls[1]:\n if player.moto.rect.colliderect(wall.rect):\n winner = 2\n running = False\n if counter2 < coll_range_2 and player2.moto.rect.colliderect(\n wall.rect):\n winner = 1\n running = False\n counter2 += 1\n pygame.draw.rect(screen, (0, 0, 255), wall.rect)\n pygame.draw.rect(screen, (255, 200, 0), player.rect)\n screen.blit(player.moto.image, (player.moto.rect.x, player.moto\n .rect.y))\n pygame.draw.rect(screen, (255, 200, 0), player2.rect)\n screen.blit(player2.moto.image, (player2.moto.rect.x, player2.\n moto.rect.y))\n pygame.display.flip()\n print('Winner: ', winner)\n running = True\n clock = pygame.time.Clock()\n sound = pygame.mixer.Sound('blast.wav')\n sound.play(loops=0, maxtime=0, fade_ms=0)\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.JOYBUTTONDOWN:\n player1Button = player1_joystick.get_button(0)\n if player1Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n player2Button = player2_joystick.get_button(0)\n if player2Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or\n e.key == pygame.K_RETURN):\n running = False\n print('BACK TO MENU')\n return True\n end = pygame.image.load('gameover.png')\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 1024, 0.5 *\n SCREEN_HEIGHT - 0.5 * 768))\n screen.fill((0, 0, 0))\n screen.blit(end, (10, 10))\n if winner == 2:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Blue won!', 1, (0, 0, 225))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n else:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Red won!', 1, (255, 0, 0))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n pygame.display.flip()\n",
"step-3": "<mask token>\n\n\nclass Moto(pygame.sprite.Sprite):\n <mask token>\n\n def move_single_axis(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n\n def moveRight(self):\n self.direction = 0\n self.image = pygame.transform.rotate(self.orig_image, 0)\n\n def moveLeft(self):\n self.direction = 1\n self.image = pygame.transform.rotate(self.orig_image, 0)\n\n def moveUp(self):\n self.direction = 2\n self.image = pygame.transform.rotate(self.orig_image, 90)\n <mask token>\n\n\nclass Player(object):\n\n def __init__(self, player_num, px, py, sx, sy, start_direction):\n self.player_num = player_num\n self.rect = pygame.Rect(px, py, sx, sy)\n self.direction = start_direction\n self.moto = Moto(player_num, start_direction)\n self.moto.rect.x = px\n self.moto.rect.y = py\n\n def moveRight(self):\n if self.direction != 1:\n self.direction = 0\n self.moto.moveRight()\n\n def moveLeft(self):\n if self.direction != 0:\n self.direction = 1\n self.moto.moveLeft()\n\n def moveUp(self):\n if self.direction != 3:\n self.direction = 2\n self.moto.moveUp()\n\n def moveDown(self):\n if self.direction != 2:\n self.direction = 3\n self.moto.moveDown()\n\n def moveOn(self):\n if self.direction == 0:\n self.move(2, 0)\n if self.direction == 1:\n self.move(-2, 0)\n if self.direction == 2:\n self.move(0, -2)\n if self.direction == 3:\n self.move(0, 2)\n\n def move(self, dx, dy):\n if dx != 0:\n self.move_single_axis(dx, 0)\n self.moto.move_single_axis(dx, 0)\n if dy != 0:\n self.move_single_axis(0, dy)\n self.moto.move_single_axis(0, dy)\n\n def move_single_axis(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n Wall(self.player_num, (self.rect.centerx, self.rect.centery))\n\n\nclass Wall(object):\n\n def __init__(self, player_num, pos):\n Game.walls[player_num].append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 3, 3)\n\n\nclass Game:\n walls = [[], []]\n\n def main(self):\n winner = 0\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n clock = pygame.time.Clock()\n Game.walls = [[], []]\n player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)\n player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)\n try:\n pygame.joystick.init()\n joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.\n joystick.get_count())]\n joysticks[0].init()\n joysticks[1].init()\n player1_joystick = joysticks[0]\n player2_joystick = joysticks[1]\n except IndexError:\n player1_joystick = None\n player2_joystick = None\n end = pygame.image.load('number3.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number2.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number1.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n pygame.display.flip()\n running = True\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n try:\n if e.type == pygame.locals.JOYAXISMOTION:\n player1jx, player1jy = player1_joystick.get_axis(0\n ), player1_joystick.get_axis(1)\n if player1jx < 0:\n player2.moveLeft()\n if player1jx > 0:\n player2.moveRight()\n if player1jy < 0:\n player2.moveUp()\n if player1jy > 0:\n player2.moveDown()\n player2jx, player2jy = player2_joystick.get_axis(0\n ), player2_joystick.get_axis(1)\n if player2jx < 0:\n player.moveLeft()\n if player2jx > 0:\n player.moveRight()\n if player2jy < 0:\n player.moveUp()\n if player2jy > 0:\n player.moveDown()\n except:\n pass\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n player.moveLeft()\n if key[pygame.K_RIGHT]:\n player.moveRight()\n if key[pygame.K_UP]:\n player.moveUp()\n if key[pygame.K_DOWN]:\n player.moveDown()\n player.moveOn()\n key = pygame.key.get_pressed()\n if key[pygame.K_a]:\n player2.moveLeft()\n if key[pygame.K_d]:\n player2.moveRight()\n if key[pygame.K_w]:\n player2.moveUp()\n if key[pygame.K_s]:\n player2.moveDown()\n player2.moveOn()\n if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:\n winner = 2\n running = False\n if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:\n winner = 1\n running = False\n if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:\n winner = 2\n running = False\n if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:\n winner = 1\n running = False\n screen.fill((0, 0, 0))\n counter1 = 0\n counter2 = 0\n coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)\n coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / \n 2 + 10)\n for wall in Game.walls[0]:\n if player2.moto.rect.colliderect(wall.rect):\n winner = 1\n running = False\n if counter1 < coll_range and player.moto.rect.colliderect(wall\n .rect):\n winner = 2\n running = False\n counter1 += 1\n pygame.draw.rect(screen, (255, 0, 0), wall.rect)\n for wall in Game.walls[1]:\n if player.moto.rect.colliderect(wall.rect):\n winner = 2\n running = False\n if counter2 < coll_range_2 and player2.moto.rect.colliderect(\n wall.rect):\n winner = 1\n running = False\n counter2 += 1\n pygame.draw.rect(screen, (0, 0, 255), wall.rect)\n pygame.draw.rect(screen, (255, 200, 0), player.rect)\n screen.blit(player.moto.image, (player.moto.rect.x, player.moto\n .rect.y))\n pygame.draw.rect(screen, (255, 200, 0), player2.rect)\n screen.blit(player2.moto.image, (player2.moto.rect.x, player2.\n moto.rect.y))\n pygame.display.flip()\n print('Winner: ', winner)\n running = True\n clock = pygame.time.Clock()\n sound = pygame.mixer.Sound('blast.wav')\n sound.play(loops=0, maxtime=0, fade_ms=0)\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.JOYBUTTONDOWN:\n player1Button = player1_joystick.get_button(0)\n if player1Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n player2Button = player2_joystick.get_button(0)\n if player2Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or\n e.key == pygame.K_RETURN):\n running = False\n print('BACK TO MENU')\n return True\n end = pygame.image.load('gameover.png')\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 1024, 0.5 *\n SCREEN_HEIGHT - 0.5 * 768))\n screen.fill((0, 0, 0))\n screen.blit(end, (10, 10))\n if winner == 2:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Blue won!', 1, (0, 0, 225))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n else:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Red won!', 1, (255, 0, 0))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n pygame.display.flip()\n",
"step-4": "<mask token>\nSCREEN_WIDTH = 1280\nSCREEN_HEIGHT = 1024\n\n\nclass Moto(pygame.sprite.Sprite):\n\n def __init__(self, player_num, start_direction):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('motor' + str(player_num) + '.png'\n ).convert()\n self.orig_image = self.image\n self.rect = self.image.get_rect()\n self.direction = start_direction\n\n def move_single_axis(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n\n def moveRight(self):\n self.direction = 0\n self.image = pygame.transform.rotate(self.orig_image, 0)\n\n def moveLeft(self):\n self.direction = 1\n self.image = pygame.transform.rotate(self.orig_image, 0)\n\n def moveUp(self):\n self.direction = 2\n self.image = pygame.transform.rotate(self.orig_image, 90)\n\n def moveDown(self):\n self.direction = 3\n self.image = pygame.transform.rotate(self.orig_image, 90)\n\n\nclass Player(object):\n\n def __init__(self, player_num, px, py, sx, sy, start_direction):\n self.player_num = player_num\n self.rect = pygame.Rect(px, py, sx, sy)\n self.direction = start_direction\n self.moto = Moto(player_num, start_direction)\n self.moto.rect.x = px\n self.moto.rect.y = py\n\n def moveRight(self):\n if self.direction != 1:\n self.direction = 0\n self.moto.moveRight()\n\n def moveLeft(self):\n if self.direction != 0:\n self.direction = 1\n self.moto.moveLeft()\n\n def moveUp(self):\n if self.direction != 3:\n self.direction = 2\n self.moto.moveUp()\n\n def moveDown(self):\n if self.direction != 2:\n self.direction = 3\n self.moto.moveDown()\n\n def moveOn(self):\n if self.direction == 0:\n self.move(2, 0)\n if self.direction == 1:\n self.move(-2, 0)\n if self.direction == 2:\n self.move(0, -2)\n if self.direction == 3:\n self.move(0, 2)\n\n def move(self, dx, dy):\n if dx != 0:\n self.move_single_axis(dx, 0)\n self.moto.move_single_axis(dx, 0)\n if dy != 0:\n self.move_single_axis(0, dy)\n self.moto.move_single_axis(0, dy)\n\n def move_single_axis(self, dx, dy):\n self.rect.x += dx\n self.rect.y += dy\n Wall(self.player_num, (self.rect.centerx, self.rect.centery))\n\n\nclass Wall(object):\n\n def __init__(self, player_num, pos):\n Game.walls[player_num].append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 3, 3)\n\n\nclass Game:\n walls = [[], []]\n\n def main(self):\n winner = 0\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n clock = pygame.time.Clock()\n Game.walls = [[], []]\n player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)\n player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)\n try:\n pygame.joystick.init()\n joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.\n joystick.get_count())]\n joysticks[0].init()\n joysticks[1].init()\n player1_joystick = joysticks[0]\n player2_joystick = joysticks[1]\n except IndexError:\n player1_joystick = None\n player2_joystick = None\n end = pygame.image.load('number3.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number2.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n end = pygame.image.load('number1.png')\n screen.fill((0, 0, 0))\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 500))\n pygame.display.flip()\n pygame.time.wait(1000)\n pygame.display.flip()\n running = True\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n try:\n if e.type == pygame.locals.JOYAXISMOTION:\n player1jx, player1jy = player1_joystick.get_axis(0\n ), player1_joystick.get_axis(1)\n if player1jx < 0:\n player2.moveLeft()\n if player1jx > 0:\n player2.moveRight()\n if player1jy < 0:\n player2.moveUp()\n if player1jy > 0:\n player2.moveDown()\n player2jx, player2jy = player2_joystick.get_axis(0\n ), player2_joystick.get_axis(1)\n if player2jx < 0:\n player.moveLeft()\n if player2jx > 0:\n player.moveRight()\n if player2jy < 0:\n player.moveUp()\n if player2jy > 0:\n player.moveDown()\n except:\n pass\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n player.moveLeft()\n if key[pygame.K_RIGHT]:\n player.moveRight()\n if key[pygame.K_UP]:\n player.moveUp()\n if key[pygame.K_DOWN]:\n player.moveDown()\n player.moveOn()\n key = pygame.key.get_pressed()\n if key[pygame.K_a]:\n player2.moveLeft()\n if key[pygame.K_d]:\n player2.moveRight()\n if key[pygame.K_w]:\n player2.moveUp()\n if key[pygame.K_s]:\n player2.moveDown()\n player2.moveOn()\n if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:\n winner = 2\n running = False\n if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:\n winner = 1\n running = False\n if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:\n winner = 2\n running = False\n if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:\n winner = 1\n running = False\n screen.fill((0, 0, 0))\n counter1 = 0\n counter2 = 0\n coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)\n coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / \n 2 + 10)\n for wall in Game.walls[0]:\n if player2.moto.rect.colliderect(wall.rect):\n winner = 1\n running = False\n if counter1 < coll_range and player.moto.rect.colliderect(wall\n .rect):\n winner = 2\n running = False\n counter1 += 1\n pygame.draw.rect(screen, (255, 0, 0), wall.rect)\n for wall in Game.walls[1]:\n if player.moto.rect.colliderect(wall.rect):\n winner = 2\n running = False\n if counter2 < coll_range_2 and player2.moto.rect.colliderect(\n wall.rect):\n winner = 1\n running = False\n counter2 += 1\n pygame.draw.rect(screen, (0, 0, 255), wall.rect)\n pygame.draw.rect(screen, (255, 200, 0), player.rect)\n screen.blit(player.moto.image, (player.moto.rect.x, player.moto\n .rect.y))\n pygame.draw.rect(screen, (255, 200, 0), player2.rect)\n screen.blit(player2.moto.image, (player2.moto.rect.x, player2.\n moto.rect.y))\n pygame.display.flip()\n print('Winner: ', winner)\n running = True\n clock = pygame.time.Clock()\n sound = pygame.mixer.Sound('blast.wav')\n sound.play(loops=0, maxtime=0, fade_ms=0)\n while running:\n clock.tick(60)\n for e in pygame.event.get():\n if e.type == pygame.JOYBUTTONDOWN:\n player1Button = player1_joystick.get_button(0)\n if player1Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n player2Button = player2_joystick.get_button(0)\n if player2Button > 0:\n running = False\n print('BACK TO MENU')\n return True\n if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or\n e.key == pygame.K_RETURN):\n running = False\n print('BACK TO MENU')\n return True\n end = pygame.image.load('gameover.png')\n screen.blit(end, (0.5 * SCREEN_WIDTH - 0.5 * 1024, 0.5 *\n SCREEN_HEIGHT - 0.5 * 768))\n screen.fill((0, 0, 0))\n screen.blit(end, (10, 10))\n if winner == 2:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Blue won!', 1, (0, 0, 225))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n else:\n myfont = pygame.font.SysFont('monospace', 72)\n label = myfont.render('Red won!', 1, (255, 0, 0))\n screen.blit(label, (0.5 * SCREEN_WIDTH - 0.5 * 500, 0.5 *\n SCREEN_HEIGHT - 0.5 * 750))\n pygame.display.flip()\n",
"step-5": "import os\nimport sys\nimport random\nimport pygame\nimport time\nfrom pygame import locals\n\nSCREEN_WIDTH = 1280\nSCREEN_HEIGHT = 1024\n\n\nclass Moto(pygame.sprite.Sprite):\n\n def __init__(self, player_num, start_direction):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load(\"motor\" + str(player_num) + \".png\").convert()\n self.orig_image = self.image\n\n # Fetch the rectangle object that has the dimensions of the image\n # Update the position of this object by setting the values of rect.x and rect.y\n self.rect = self.image.get_rect()\n\n self.direction = start_direction\n\n def move_single_axis(self, dx, dy):\n # Move the rect\n self.rect.x += dx\n self.rect.y += dy\n\n def moveRight(self):\n self.direction = 0\n self.image = pygame.transform.rotate(self.orig_image, 0)\n\n def moveLeft(self):\n self.direction = 1\n self.image = pygame.transform.rotate(self.orig_image, 0)\n\n def moveUp(self):\n self.direction = 2\n self.image = pygame.transform.rotate(self.orig_image, 90)\n\n def moveDown(self):\n self.direction = 3\n self.image = pygame.transform.rotate(self.orig_image, 90)\n\n\n# Class for the orange dude\nclass Player(object):\n\n def __init__(self, player_num, px, py, sx, sy, start_direction):\n self.player_num = player_num\n self.rect = pygame.Rect(px, py, sx, sy)\n self.direction = start_direction\n self.moto = Moto(player_num, start_direction)\n self.moto.rect.x = px\n self.moto.rect.y = py\n\n def moveRight(self):\n if self.direction != 1:\n self.direction = 0\n self.moto.moveRight()\n\n def moveLeft(self):\n if self.direction != 0:\n self.direction = 1\n self.moto.moveLeft()\n\n def moveUp(self):\n if self.direction != 3:\n self.direction = 2\n self.moto.moveUp()\n\n def moveDown(self):\n if self.direction != 2:\n self.direction = 3\n self.moto.moveDown()\n\n def moveOn(self):\n if self.direction == 0:\n self.move(2, 0)\n if self.direction == 1:\n self.move(-2, 0)\n if self.direction == 2:\n self.move(0, -2)\n if self.direction == 3:\n self.move(0, 2)\n\n def move(self, dx, dy):\n\n # Move each axis separately. Note that this checks for collisions both times.\n if dx != 0:\n self.move_single_axis(dx, 0)\n self.moto.move_single_axis(dx, 0)\n if dy != 0:\n self.move_single_axis(0, dy)\n self.moto.move_single_axis(0, dy)\n\n def move_single_axis(self, dx, dy):\n\n # Move the rect\n self.rect.x += dx\n self.rect.y += dy\n\n # Draw a wall (after the movement)\n Wall(self.player_num, (self.rect.centerx, self.rect.centery))\n\n\n# Nice class to hold a wall rect\nclass Wall(object):\n\n def __init__(self, player_num, pos):\n Game.walls[player_num].append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 3, 3)\n\n\n# MAIN\nclass Game:\n\n walls = [[], []]\n\n def main(self):\n\n winner = 0\n\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n clock = pygame.time.Clock()\n # walls for 2 players: lists in list\n Game.walls = [[], []]\n # starting positions\n player = Player(0, SCREEN_WIDTH - 80, int(SCREEN_HEIGHT / 2), 2, 16, 1)\n player2 = Player(1, 80, int(SCREEN_HEIGHT / 2), 2, 16, 0)\n\n # JOYSTICK\n try:\n pygame.joystick.init()\n joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n joysticks[0].init()\n joysticks[1].init()\n player1_joystick = joysticks[0]\n player2_joystick = joysticks[1]\n except IndexError:\n player1_joystick = None\n player2_joystick = None\n\n end = pygame.image.load('number3.png')\n screen.fill((0, 0, 0))\n screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))\n pygame.display.flip()\n pygame.time.wait(1000)\n\n end = pygame.image.load('number2.png')\n screen.fill((0, 0, 0))\n screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))\n pygame.display.flip()\n pygame.time.wait(1000)\n\n end = pygame.image.load('number1.png')\n screen.fill((0, 0, 0))\n screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))\n pygame.display.flip()\n pygame.time.wait(1000)\n\n # end = pygame.image.load('arcade.jpg').convert()\n # screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 500)))\n pygame.display.flip()\n\n # background_image = pygame.transform.scale(pygame.image.load('arcade.jpg').convert(), (1280, 1024))\n # screen.blit(background_image, [0, 0])\n\n running = True\n while running:\n clock.tick(60)\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n\n # JOYSTICK\n try:\n if e.type == pygame.locals.JOYAXISMOTION:\n player1jx, player1jy = player1_joystick.get_axis(0), player1_joystick.get_axis(1)\n if player1jx < 0:\n player2.moveLeft()\n if player1jx > 0:\n player2.moveRight()\n if player1jy < 0:\n player2.moveUp()\n if player1jy > 0:\n player2.moveDown()\n player2jx, player2jy = player2_joystick.get_axis(0), player2_joystick.get_axis(1)\n if player2jx < 0:\n player.moveLeft()\n if player2jx > 0:\n player.moveRight()\n if player2jy < 0:\n player.moveUp()\n if player2jy > 0:\n player.moveDown()\n except:\n pass\n\n # PLAYER 1\n # Move the player if an arrow key is pressed\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n player.moveLeft()\n if key[pygame.K_RIGHT]:\n player.moveRight()\n if key[pygame.K_UP]:\n player.moveUp()\n if key[pygame.K_DOWN]:\n player.moveDown()\n\n player.moveOn()\n\n # PLAYER 2\n key = pygame.key.get_pressed()\n if key[pygame.K_a]:\n player2.moveLeft()\n if key[pygame.K_d]:\n player2.moveRight()\n if key[pygame.K_w]:\n player2.moveUp()\n if key[pygame.K_s]:\n player2.moveDown()\n\n player2.moveOn()\n\n # check borders\n if player.moto.rect.x < 0 or player.moto.rect.x > SCREEN_WIDTH:\n winner = 2\n running = False\n if player2.moto.rect.x < 0 or player2.moto.rect.x > SCREEN_WIDTH:\n winner = 1\n running = False\n if player.moto.rect.y < 0 or player.moto.rect.y > SCREEN_HEIGHT:\n winner = 2\n running = False\n if player2.moto.rect.y < 0 or player2.moto.rect.y > SCREEN_HEIGHT:\n winner = 1\n running = False\n # Draw the scene\n # screen.blit(background_image, [0, 0])\n # pygame.display.flip()\n screen.fill((0, 0, 0))\n\n # Player 1 walls\n counter1 = 0\n counter2 = 0\n coll_range = len(Game.walls[0]) - (player.moto.rect.width / 2 + 10)\n coll_range_2 = len(Game.walls[1]) - (player2.moto.rect.width / 2 + 10)\n for wall in Game.walls[0]:\n if player2.moto.rect.colliderect(wall.rect):\n winner = 1\n running = False\n if (counter1 < coll_range) and player.moto.rect.colliderect(wall.rect):\n winner = 2\n running = False\n counter1 += 1\n pygame.draw.rect(screen, (255, 0, 0), wall.rect)\n # Player 2 walls\n for wall in Game.walls[1]:\n if player.moto.rect.colliderect(wall.rect):\n winner = 2\n running = False\n if (counter2 < coll_range_2) and player2.moto.rect.colliderect(wall.rect):\n winner = 1\n running = False\n counter2 += 1\n pygame.draw.rect(screen, (0, 0, 255), wall.rect)\n\n # Player 1\n pygame.draw.rect(screen, (255, 200, 0), player.rect)\n screen.blit(player.moto.image, (player.moto.rect.x, player.moto.rect.y))\n\n # Player 2\n pygame.draw.rect(screen, (255, 200, 0), player2.rect)\n screen.blit(player2.moto.image, (player2.moto.rect.x, player2.moto.rect.y))\n\n pygame.display.flip()\n\n # GAME OVER\n print(\"Winner: \", winner)\n running = True\n clock = pygame.time.Clock()\n sound = pygame.mixer.Sound('blast.wav')\n sound.play(loops=0, maxtime=0, fade_ms=0)\n\n while running:\n\n clock.tick(60)\n\n for e in pygame.event.get():\n if e.type == pygame.JOYBUTTONDOWN:\n player1Button = player1_joystick.get_button(0)\n if (player1Button > 0):\n running = False\n print(\"BACK TO MENU\")\n return True\n player2Button = player2_joystick.get_button(0)\n if (player2Button > 0):\n running = False\n print(\"BACK TO MENU\")\n return True\n\n if e.type == pygame.KEYDOWN and (e.key == pygame.K_KP_ENTER or e.key == pygame.K_RETURN):\n running = False\n print(\"BACK TO MENU\")\n return True\n\n end = pygame.image.load('gameover.png')\n screen.blit(end, ((0.5 * SCREEN_WIDTH) - (0.5 * 1024), (0.5 * SCREEN_HEIGHT) - (0.5 * 768)))\n screen.fill((0, 0, 0))\n screen.blit(end, (10, 10))\n if winner == 2:\n myfont = pygame.font.SysFont(\"monospace\", 72)\n label = myfont.render('Blue won!', 1, (0, 0, 225))\n screen.blit(label, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 750)))\n else:\n myfont = pygame.font.SysFont(\"monospace\", 72)\n label = myfont.render('Red won!', 1, (255, 0, 0))\n screen.blit(label, ((0.5 * SCREEN_WIDTH) - (0.5 * 500), (0.5 * SCREEN_HEIGHT) - (0.5 * 750)))\n\n pygame.display.flip()\n",
"step-ids": [
13,
15,
19,
22,
24
]
}
|
[
13,
15,
19,
22,
24
] |
import torch
import random
from itertools import product
from Struct import Action
class Agent(object):
"""the agent"""
def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5, traceDecay=0.3):
# action set
possibleChangesPerMagnet = (1e-2, 1e-3, 0, -1e-2, -1e-3)
# possibleChangesPerMagnet = (0, -1e-2, -1e-3)
self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for x, y in
product(possibleChangesPerMagnet, possibleChangesPerMagnet))
# probability to act greedy
self.epsilon = epsilon
# Q-function
self.q = q
# memory
self.shortMemory = []
self.memorySize = 1
self.traceDecay = traceDecay
self.replayMemory = []
self.replayMemorySize = int(1e4)
# learning
self.discount = discount
self.learningRate = learningRate
return
def takeAction(self, state):
"""take an action according to current state"""
# go greedy or not?
if random.uniform(0, 1) < self.epsilon:
# greedy selection
# find best action
allActions = torch.stack(
tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))
evaluation = self.q.evaluateBunch(allActions)
action = Action(state, self.actionSet[evaluation.argmax()])
return action
else:
# random selection
return Action(state, random.choice(self.actionSet))
def bestAction(self, state, isTensor=False):
"""returns best action and it's rating"""
# get value for every possible action
if not isTensor:
allActions = torch.stack(
tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))
else:
allActions = torch.stack(
tuple(torch.cat((state, changes)) for changes in self.actionSet))
allValues = self.q.evaluateBunch(allActions)
# determine index of highest value
bestIndex = allValues.argmax()
# get best action
bestAction = allActions[bestIndex, -2:]
return bestAction, allValues[bestIndex]
def remember(self, transition):
"""place a transition in the memory"""
# reduce eligibility for old memories
for memory in self.shortMemory:
memory *= self.traceDecay * self.discount
# add new memory
if len(self.shortMemory) < self.memorySize:
self.shortMemory.append(transition)
else:
del self.shortMemory[0]
self.shortMemory.append(transition)
return
def getShortMemory(self):
return self.shortMemory
def wipeShortMemory(self):
"""wipe all recent experience"""
self.shortMemory = []
return
def learn(self, netInput, labels):
"""train Q-function"""
self.q.trainer.applyUpdate(netInput, labels)
return
def getSarsaLambda(self, shortMemory):
"""generate TD lambda update targets from short memory"""
# get temporal difference error
delta = shortMemory[-1].reward + self.discount * self.q.evaluate(
self.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(shortMemory[-1].action)
# states
netInput = []
for memory in shortMemory:
netInput.append(
torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
# updates for every state in memory with respect to its eligibility
labels = []
for memory in shortMemory:
labels.append(self.learningRate * delta * memory.action.eligibility)
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput, labels
def getDQN(self, shortMemory):
"""generates DQN update targets from short memory"""
# sampleSize = self.memorySize // 5 # use only with traces (= short memory larger than 5 entries)
sampleSize = 1
if len(shortMemory) < sampleSize:
sample = shortMemory
else:
sample = random.sample(shortMemory, sampleSize)
# states
netInput = []
for memory in sample:
netInput.append(
torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))
netInput = torch.stack(netInput)
# updates for Q-values
labels = []
for memory in sample:
if memory.nextState:
labels.append(memory.reward)
else:
currentQ = self.q.evaluate(memory.action)
labels.append(currentQ + self.learningRate * (
self.discount * self.q.evaluateMax(memory.nextState, self.actionSet) - currentQ))
labels = torch.tensor(labels)
labels = torch.unsqueeze(labels, 1)
return netInput.float(), labels.float() # casting added due to occasional occurrence of LongTensors <- why?
|
normal
|
{
"blob_id": "63edbbbad9561ddae005d2b5e22a089819dc34c5",
"index": 1821,
"step-1": "<mask token>\n\n\nclass Agent(object):\n <mask token>\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n <mask token>\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n <mask token>\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n",
"step-2": "<mask token>\n\n\nclass Agent(object):\n <mask token>\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n if random.uniform(0, 1) < self.epsilon:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n <mask token>\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n",
"step-3": "<mask token>\n\n\nclass Agent(object):\n <mask token>\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n if random.uniform(0, 1) < self.epsilon:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n\n def wipeShortMemory(self):\n \"\"\"wipe all recent experience\"\"\"\n self.shortMemory = []\n return\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n",
"step-4": "import torch\nimport random\nfrom itertools import product\nfrom Struct import Action\n\n\nclass Agent(object):\n \"\"\"the agent\"\"\"\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n if random.uniform(0, 1) < self.epsilon:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n\n def wipeShortMemory(self):\n \"\"\"wipe all recent experience\"\"\"\n self.shortMemory = []\n return\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n",
"step-5": "import torch\nimport random\nfrom itertools import product\n\nfrom Struct import Action\n\n\nclass Agent(object):\n \"\"\"the agent\"\"\"\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5, traceDecay=0.3):\n # action set\n possibleChangesPerMagnet = (1e-2, 1e-3, 0, -1e-2, -1e-3)\n # possibleChangesPerMagnet = (0, -1e-2, -1e-3)\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for x, y in\n product(possibleChangesPerMagnet, possibleChangesPerMagnet))\n\n # probability to act greedy\n self.epsilon = epsilon\n\n # Q-function\n self.q = q\n\n # memory\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n\n self.replayMemory = []\n self.replayMemorySize = int(1e4)\n\n # learning\n self.discount = discount\n self.learningRate = learningRate\n\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n # get value for every possible action\n if not isTensor:\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(\n tuple(torch.cat((state, changes)) for changes in self.actionSet))\n\n allValues = self.q.evaluateBunch(allActions)\n\n # determine index of highest value\n bestIndex = allValues.argmax()\n\n # get best action\n bestAction = allActions[bestIndex, -2:]\n\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n # reduce eligibility for old memories\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n\n # add new memory\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n\n return\n\n def getShortMemory(self):\n return self.shortMemory\n\n def wipeShortMemory(self):\n \"\"\"wipe all recent experience\"\"\"\n self.shortMemory = []\n return\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n # get temporal difference error\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(\n self.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(shortMemory[-1].action)\n\n # states\n netInput = []\n for memory in shortMemory:\n netInput.append(\n torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))\n\n netInput = torch.stack(netInput)\n\n # updates for every state in memory with respect to its eligibility\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility)\n\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n # sampleSize = self.memorySize // 5 # use only with traces (= short memory larger than 5 entries)\n sampleSize = 1\n\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n\n # states\n netInput = []\n for memory in sample:\n netInput.append(\n torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))\n\n netInput = torch.stack(netInput)\n\n # updates for Q-values\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (\n self.discount * self.q.evaluateMax(memory.nextState, self.actionSet) - currentQ))\n\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n\n return netInput.float(), labels.float() # casting added due to occasional occurrence of LongTensors <- why?\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
import subprocess
class BaseExecution:
def __init__(self, flag, parser):
self.flag = flag
self.parser = parser
def execute(self):
process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=
subprocess.PIPE, stderr=subprocess.PIPE)
output, err = process.communicate()
return_code = process.returncode
parser = self.parser(output, err, return_code)
result = parser.parse()
return result
|
normal
|
{
"blob_id": "d8af43d24a2f2b99bc8b5098f251e017852d6d86",
"index": 1085,
"step-1": "<mask token>\n\n\nclass BaseExecution:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"step-4": "import subprocess\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Lukas Bestle <[email protected]>
# Copyright: (c) 2017, Michael Heap <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- Michael Heap (@mheap)
- Lukas Bestle (@lukasbestle)
options:
id:
description:
- The Mac App Store identifier of the app(s) you want to manage.
- This can be found by running C(mas search APP_NAME) on your machine.
type: list
elements: int
state:
description:
- Desired state of the app installation.
- The C(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
- latest
- present
default: present
upgrade_all:
description:
- Upgrade all installed Mac App Store apps.
type: bool
default: "no"
aliases: ["upgrade"]
requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Install Keynote
community.general.mas:
id: 409183694
state: present
- name: Install Divvy with command mas installed in /usr/local/bin
community.general.mas:
id: 413857545
state: present
environment:
PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
- name: Install a list of apps
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
- name: Ensure the latest Keynote version is installed
community.general.mas:
id: 409183694
state: latest
- name: Upgrade all installed Mac App Store apps
community.general.mas:
upgrade_all: yes
- name: Install specific apps and also upgrade all others
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
upgrade_all: yes
- name: Uninstall Divvy
community.general.mas:
id: 413857545
state: absent
become: yes # Uninstallation requires root permissions
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from distutils.version import StrictVersion
import os
class Mas(object):
def __init__(self, module):
self.module = module
# Initialize data properties
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None # Populated only if needed
self._outdated = None # Populated only if needed
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {
'changed': False
}
self.check_mas_tool()
def app_command(self, command, id):
''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(
msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
)
# No error or dry run
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
''' Verifies that the `mas` tool is available in a recent version '''
# Is the `mas` tool available at all?
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
# Is the version recent enough?
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
''' Verifies that the user is signed in to the Mac App Store '''
# Only check this once per execution
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split("\n", 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg='You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
''' Exit with the data we have collected over time '''
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
rc, raw_apps, err = self.run([command])
rows = raw_apps.split("\n")
if rows[0] == "No installed apps found":
rows = []
apps = []
for r in rows:
# Format: "123456789 App Name"
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
''' Returns the list of installed apps '''
# Populate cache if not already done
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
''' Checks whether the given app is installed '''
return int(id) in self.installed()
def is_outdated(self, id):
''' Checks whether the given app is installed, but outdated '''
return int(id) in self.outdated()
def outdated(self):
''' Returns the list of installed, but outdated apps '''
# Populate cache if not already done
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
''' Runs a command of the `mas` tool '''
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
''' Upgrades all installed apps and sets the correct result data '''
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(type='list', elements='int'),
state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
),
supports_check_mode=True
)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
# Run operations on the given app IDs
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
# Ensure we are root
if os.getuid() != 0:
module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
# Upgrade all apps if requested
mas._outdated = None # Clear cache
if upgrade and mas.outdated():
mas.upgrade_all()
# Exit with the collected data
mas.exit()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "8b965fd91396735e0153390b4eff540d3aac3aff",
"index": 4916,
"step-1": "<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n__metaclass__ = type\nDOCUMENTATION = \"\"\"\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n\"\"\"\nEXAMPLES = \"\"\"\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n\"\"\"\nRETURN = ' # '\n<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nDOCUMENTATION = \"\"\"\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n\"\"\"\nEXAMPLES = \"\"\"\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n\"\"\"\nRETURN = ' # '\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom distutils.version import StrictVersion\nimport os\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2020, Lukas Bestle <[email protected]>\n# Copyright: (c) 2017, Michael Heap <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n'''\n\nEXAMPLES = '''\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n'''\n\nRETURN = r''' # '''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom distutils.version import StrictVersion\nimport os\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n\n # Initialize data properties\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None # Populated only if needed\n self._outdated = None # Populated only if needed\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {\n 'changed': False\n }\n\n self.check_mas_tool()\n\n def app_command(self, command, id):\n ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''\n\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(\n msg=\"Error running command '{0}' on app '{1}': {2}\".format(command, str(id), out.rstrip())\n )\n\n # No error or dry run\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n ''' Verifies that the `mas` tool is available in a recent version '''\n\n # Is the `mas` tool available at all?\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n\n # Is the version recent enough?\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):\n self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n ''' Verifies that the user is signed in to the Mac App Store '''\n\n # Only check this once per execution\n if self._checked_signin:\n return\n\n rc, out, err = self.run(['account'])\n if out.split(\"\\n\", 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg='You must be signed in to the Mac App Store')\n\n self._checked_signin = True\n\n def exit(self):\n ''' Exit with the data we have collected over time '''\n\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''\n\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split(\"\\n\")\n if rows[0] == \"No installed apps found\":\n rows = []\n apps = []\n for r in rows:\n # Format: \"123456789 App Name\"\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n\n return apps\n\n def installed(self):\n ''' Returns the list of installed apps '''\n\n # Populate cache if not already done\n if self._installed is None:\n self._installed = self.get_current_state('list')\n\n return self._installed\n\n def is_installed(self, id):\n ''' Checks whether the given app is installed '''\n\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n ''' Checks whether the given app is installed, but outdated '''\n\n return int(id) in self.outdated()\n\n def outdated(self):\n ''' Returns the list of installed, but outdated apps '''\n\n # Populate cache if not already done\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n\n return self._outdated\n\n def run(self, cmd):\n ''' Runs a command of the `mas` tool '''\n\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n ''' Upgrades all installed apps and sets the correct result data '''\n\n outdated = self.outdated()\n\n if not self.module.check_mode:\n self.check_signin()\n\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())\n\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n id=dict(type='list', elements='int'),\n state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),\n upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),\n ),\n supports_check_mode=True\n )\n mas = Mas(module)\n\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n\n # Run operations on the given app IDs\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n\n elif state == 'absent':\n if mas.is_installed(app):\n # Ensure we are root\n if os.getuid() != 0:\n module.fail_json(msg=\"Uninstalling apps requires root permissions ('become: yes')\")\n\n mas.app_command('uninstall', app)\n\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n\n # Upgrade all apps if requested\n mas._outdated = None # Clear cache\n if upgrade and mas.outdated():\n mas.upgrade_all()\n\n # Exit with the collected data\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
14,
15,
16,
17,
18
]
}
|
[
14,
15,
16,
17,
18
] |
w=int(input())
lst=[i+1 for i in range(100)]
for i in range(2,100):
lst.append(i*100)
lst.append(i*10000)
lst.append(10000)
print(297)
print(*lst)
|
normal
|
{
"blob_id": "1d004ec0f4f5c50f49834f169812737d16f22b96",
"index": 3967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, 100):\n lst.append(i * 100)\n lst.append(i * 10000)\nlst.append(10000)\nprint(297)\nprint(*lst)\n",
"step-3": "w = int(input())\nlst = [(i + 1) for i in range(100)]\nfor i in range(2, 100):\n lst.append(i * 100)\n lst.append(i * 10000)\nlst.append(10000)\nprint(297)\nprint(*lst)\n",
"step-4": "w=int(input())\nlst=[i+1 for i in range(100)]\n\nfor i in range(2,100):\n lst.append(i*100)\n lst.append(i*10000)\nlst.append(10000) \nprint(297)\nprint(*lst)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import argparse
import keyring
import papercut
import ConfigParser
import getpass
import time
import os
config = ConfigParser.ConfigParser()
config.read([os.path.expanduser('~/.papercut')])
try:
username = config.get('papercut','username')
except ConfigParser.NoSectionError:
username = None
p = argparse.ArgumentParser(description='Print some documents')
p.add_argument('--print', '-p', help='a filename to be printed', dest='printjob')
p.add_argument('--printer', '-r', help='a printer name to print to')
p.add_argument('--balance', '-b', nargs='?', const=True, help='display the user\' printing balance')
p.add_argument('--list', '-l', nargs='?', const=True, help='list available printers')
p.add_argument('--user', '-u', help='username')
p.add_argument('--password-options', '-o', choices=['save','prompt'], help='save: prompt for password and save to keyring,\n prompt: prompt for password')
args = p.parse_args()
if not username and not args.user:
username = raw_input('enter username: ')
password = keyring.get_password('papercut', username)
def list_printers(sessID):
printers = papercut.listPrinters(sessID)
print "\nAvailable printers:"
for i,printer in enumerate(printers):
print i,"\t",printer[1], "." * (50 - len(printer[1])), printer[0]
return printers
def get_balance(sessID):
print '\nYour balance is now: $ %.2f' % (int(papercut.getBalance(sessID)) / 100.0)
if args.password_options or not password:
password = getpass.getpass()
if args.password_options == 'save':
keyring.set_password('papercut', username, password)
print "password saved in keyring"
if args.list or args.balance or args.printjob or args.printer:
sessID = papercut.login(username, password)
if sessID:
print '\nLogged in to PaperCut with session ID',sessID
if args.list: list_printers(sessID)
if args.balance: get_balance(sessID)
if args.printjob:
if not args.printer:
printers = list_printers(sessID)
args.printer = raw_input('select printer: ')
try:
printerIndex = int(args.printer)
args.printer = printers[printerIndex][1]
except ValueError:
pass
printJobID = papercut.printFile(args.printjob, args.printer, sessID)
print '\nJob sent to printer', args.printer
status = papercut.getPrintStatus(printJobID)
while(status['status'] == 'Submitting'):
time.sleep(0.1)
status = papercut.getPrintStatus(printJobID)
print "\nJob queued for printing."
while(not status['complete']):
time.sleep(0.1)
status = papercut.getPrintStatus(printJobID)
print "\nComplete!"
print "\nThis job cost $", status['cost']
# print status
get_balance(sessID)
else:
print '\nDid not successfully log in to PaperCut'
|
normal
|
{
"blob_id": "33cc8814d9397bcb0041728407efef80a136f151",
"index": 505,
"step-1": "#!/usr/bin/env python\nimport argparse\nimport keyring\nimport papercut\nimport ConfigParser\nimport getpass\nimport time\nimport os\n\nconfig = ConfigParser.ConfigParser()\nconfig.read([os.path.expanduser('~/.papercut')])\ntry:\n username = config.get('papercut','username')\nexcept ConfigParser.NoSectionError:\n username = None\n\np = argparse.ArgumentParser(description='Print some documents')\np.add_argument('--print', '-p', help='a filename to be printed', dest='printjob')\np.add_argument('--printer', '-r', help='a printer name to print to')\np.add_argument('--balance', '-b', nargs='?', const=True, help='display the user\\' printing balance')\np.add_argument('--list', '-l', nargs='?', const=True, help='list available printers')\np.add_argument('--user', '-u', help='username')\np.add_argument('--password-options', '-o', choices=['save','prompt'], help='save: prompt for password and save to keyring,\\n prompt: prompt for password')\n\nargs = p.parse_args()\n\nif not username and not args.user:\n username = raw_input('enter username: ')\n\npassword = keyring.get_password('papercut', username)\n\ndef list_printers(sessID):\n printers = papercut.listPrinters(sessID)\n print \"\\nAvailable printers:\"\n for i,printer in enumerate(printers):\n print i,\"\\t\",printer[1], \".\" * (50 - len(printer[1])), printer[0]\n return printers\n\ndef get_balance(sessID):\n print '\\nYour balance is now: $ %.2f' % (int(papercut.getBalance(sessID)) / 100.0)\n\n\n\nif args.password_options or not password:\n password = getpass.getpass()\n \nif args.password_options == 'save':\n keyring.set_password('papercut', username, password)\n print \"password saved in keyring\"\n\nif args.list or args.balance or args.printjob or args.printer:\n sessID = papercut.login(username, password)\n if sessID:\n print '\\nLogged in to PaperCut with session ID',sessID\n if args.list: list_printers(sessID)\n if args.balance: get_balance(sessID)\n if args.printjob:\n if not args.printer:\n printers = list_printers(sessID)\n args.printer = raw_input('select printer: ')\n try:\n printerIndex = int(args.printer)\n args.printer = printers[printerIndex][1]\n except ValueError:\n pass\n printJobID = papercut.printFile(args.printjob, args.printer, sessID)\n print '\\nJob sent to printer', args.printer\n \n status = papercut.getPrintStatus(printJobID)\n while(status['status'] == 'Submitting'):\n time.sleep(0.1)\n status = papercut.getPrintStatus(printJobID)\n print \"\\nJob queued for printing.\"\n\n while(not status['complete']):\n time.sleep(0.1)\n status = papercut.getPrintStatus(printJobID)\n print \"\\nComplete!\"\n print \"\\nThis job cost $\", status['cost']\n# print status\n get_balance(sessID)\n \n else:\n print '\\nDid not successfully log in to PaperCut'\n \n\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
from __future__ import print_function
from types_ import SimpleObject, SimpleObjectImmutable, NamedTuple, SimpleTuple, c_struct
import timeit
import random
TYPES = [
SimpleObjectImmutable,
SimpleObject,
NamedTuple,
SimpleTuple,
c_struct,
]
a = 1035
b = b'\x54 - fo!'
c = [1, 5, 66, ]
def measure_creation():
random.shuffle(TYPES)
for type_ in TYPES:
pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)
body = '{}(a, b, c)'.format(type_.__name__)
print('\t', type_.__name__, timeit.repeat(stmt=body, setup=pre, repeat=5))
def test_immut():
'''Verifies that the type called SimpleObjectImmutable
actually satisfies that definition.
'''
from types_ import read_only
q = SimpleObjectImmutable(a, b, c)
SimpleObjectImmutable.__setattr__ = read_only
try:
q.a = 1
assert(False)
except ValueError:
assert(True)
if __name__ == '__main__':
measure_creation()
test_immut()
|
normal
|
{
"blob_id": "ba73562cd8ffa52a1fede35c3325e7e76a6dad54",
"index": 7966,
"step-1": "<mask token>\n\n\ndef measure_creation():\n random.shuffle(TYPES)\n for type_ in TYPES:\n pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)\n body = '{}(a, b, c)'.format(type_.__name__)\n print('\\t', type_.__name__, timeit.repeat(stmt=body, setup=pre,\n repeat=5))\n\n\ndef test_immut():\n \"\"\"Verifies that the type called SimpleObjectImmutable\n actually satisfies that definition.\n \"\"\"\n from types_ import read_only\n q = SimpleObjectImmutable(a, b, c)\n SimpleObjectImmutable.__setattr__ = read_only\n try:\n q.a = 1\n assert False\n except ValueError:\n assert True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef measure_creation():\n random.shuffle(TYPES)\n for type_ in TYPES:\n pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)\n body = '{}(a, b, c)'.format(type_.__name__)\n print('\\t', type_.__name__, timeit.repeat(stmt=body, setup=pre,\n repeat=5))\n\n\ndef test_immut():\n \"\"\"Verifies that the type called SimpleObjectImmutable\n actually satisfies that definition.\n \"\"\"\n from types_ import read_only\n q = SimpleObjectImmutable(a, b, c)\n SimpleObjectImmutable.__setattr__ = read_only\n try:\n q.a = 1\n assert False\n except ValueError:\n assert True\n\n\nif __name__ == '__main__':\n measure_creation()\n test_immut()\n",
"step-3": "<mask token>\nTYPES = [SimpleObjectImmutable, SimpleObject, NamedTuple, SimpleTuple, c_struct\n ]\na = 1035\nb = b'T - fo!'\nc = [1, 5, 66]\n\n\ndef measure_creation():\n random.shuffle(TYPES)\n for type_ in TYPES:\n pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)\n body = '{}(a, b, c)'.format(type_.__name__)\n print('\\t', type_.__name__, timeit.repeat(stmt=body, setup=pre,\n repeat=5))\n\n\ndef test_immut():\n \"\"\"Verifies that the type called SimpleObjectImmutable\n actually satisfies that definition.\n \"\"\"\n from types_ import read_only\n q = SimpleObjectImmutable(a, b, c)\n SimpleObjectImmutable.__setattr__ = read_only\n try:\n q.a = 1\n assert False\n except ValueError:\n assert True\n\n\nif __name__ == '__main__':\n measure_creation()\n test_immut()\n",
"step-4": "from __future__ import print_function\nfrom types_ import SimpleObject, SimpleObjectImmutable, NamedTuple, SimpleTuple, c_struct\nimport timeit\nimport random\nTYPES = [SimpleObjectImmutable, SimpleObject, NamedTuple, SimpleTuple, c_struct\n ]\na = 1035\nb = b'T - fo!'\nc = [1, 5, 66]\n\n\ndef measure_creation():\n random.shuffle(TYPES)\n for type_ in TYPES:\n pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)\n body = '{}(a, b, c)'.format(type_.__name__)\n print('\\t', type_.__name__, timeit.repeat(stmt=body, setup=pre,\n repeat=5))\n\n\ndef test_immut():\n \"\"\"Verifies that the type called SimpleObjectImmutable\n actually satisfies that definition.\n \"\"\"\n from types_ import read_only\n q = SimpleObjectImmutable(a, b, c)\n SimpleObjectImmutable.__setattr__ = read_only\n try:\n q.a = 1\n assert False\n except ValueError:\n assert True\n\n\nif __name__ == '__main__':\n measure_creation()\n test_immut()\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nfrom types_ import SimpleObject, SimpleObjectImmutable, NamedTuple, SimpleTuple, c_struct\nimport timeit\nimport random\n\nTYPES = [\n SimpleObjectImmutable,\n SimpleObject,\n NamedTuple,\n SimpleTuple,\n c_struct,\n ]\n\na = 1035\nb = b'\\x54 - fo!'\nc = [1, 5, 66, ]\n\ndef measure_creation():\n random.shuffle(TYPES)\n\n for type_ in TYPES:\n pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)\n body = '{}(a, b, c)'.format(type_.__name__)\n print('\\t', type_.__name__, timeit.repeat(stmt=body, setup=pre, repeat=5))\n\n\ndef test_immut():\n '''Verifies that the type called SimpleObjectImmutable\n actually satisfies that definition.\n '''\n from types_ import read_only\n\n q = SimpleObjectImmutable(a, b, c)\n SimpleObjectImmutable.__setattr__ = read_only\n try:\n q.a = 1\n assert(False)\n except ValueError:\n assert(True)\n\nif __name__ == '__main__':\n measure_creation()\n\n test_immut()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plotObject(obj):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y,z = numpy.nonzero(obj>0)
ax.scatter(x,y,z,c='r',s=10)
xb,yb,zb = numpy.nonzero(obj<0)
ax.scatter(xb,yb,zb,c='b',s=1)
plt.show()
class GridData:
def __init__(self,datafile,labelfile):
f = open(datafile,'rb')
f2 = open(labelfile,'r')
self.samples = []
self.labels = []
self.label_names = []
self.data_size = 30
self.source = datafile
sample_size = self.data_size ** 3
file_size = os.path.getsize(datafile)
self.num_samples = file_size / sample_size
for i in range(self.num_samples):
arr = numpy.fromfile(f,dtype=numpy.int8,count=sample_size)
matrix = arr.reshape((self.data_size,self.data_size,self.data_size))
self.samples.append(matrix.transpose())
l = f2.readline().split()
self.labels.append(int(l[0]))
self.label_names.append(l[1])
def __str__(self):
return "<%s %d samples (%dx%dx%d)>" % (self.source,self.num_samples,self.data_size,self.data_size,self.data_size)
def __repr__(self):
return str(self)
if __name__=="__main__":
partial_view_file = 'partial_view_single.data'
complete_view_file = 'complete_view_single.data'
label_file = 'labels_single.data'
partial_views = GridData(partial_view_file,label_file)
complete_views = GridData(complete_view_file,label_file)
print(partial_views)
print(complete_views)
for i in range(partial_views.num_samples):
plotObject(partial_views.samples[i])
plotObject(complete_views.samples[i])
|
normal
|
{
"blob_id": "8475792cc2d55f030f0bd9e7d0240e3b59ed996b",
"index": 7774,
"step-1": "<mask token>\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n\n def __str__(self):\n return '<%s %d samples (%dx%dx%d)>' % (self.source, self.\n num_samples, self.data_size, self.data_size, self.data_size)\n\n def __repr__(self):\n return str(self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plotObject(obj):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x, y, z = numpy.nonzero(obj > 0)\n ax.scatter(x, y, z, c='r', s=10)\n xb, yb, zb = numpy.nonzero(obj < 0)\n ax.scatter(xb, yb, zb, c='b', s=1)\n plt.show()\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n\n def __str__(self):\n return '<%s %d samples (%dx%dx%d)>' % (self.source, self.\n num_samples, self.data_size, self.data_size, self.data_size)\n\n def __repr__(self):\n return str(self)\n\n\n<mask token>\n",
"step-4": "import os\nimport numpy\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef plotObject(obj):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x, y, z = numpy.nonzero(obj > 0)\n ax.scatter(x, y, z, c='r', s=10)\n xb, yb, zb = numpy.nonzero(obj < 0)\n ax.scatter(xb, yb, zb, c='b', s=1)\n plt.show()\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n\n def __str__(self):\n return '<%s %d samples (%dx%dx%d)>' % (self.source, self.\n num_samples, self.data_size, self.data_size, self.data_size)\n\n def __repr__(self):\n return str(self)\n\n\nif __name__ == '__main__':\n partial_view_file = 'partial_view_single.data'\n complete_view_file = 'complete_view_single.data'\n label_file = 'labels_single.data'\n partial_views = GridData(partial_view_file, label_file)\n complete_views = GridData(complete_view_file, label_file)\n print(partial_views)\n print(complete_views)\n for i in range(partial_views.num_samples):\n plotObject(partial_views.samples[i])\n plotObject(complete_views.samples[i])\n",
"step-5": "import os\nimport numpy\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef plotObject(obj):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tx,y,z = numpy.nonzero(obj>0)\n\tax.scatter(x,y,z,c='r',s=10)\n\txb,yb,zb = numpy.nonzero(obj<0)\n\tax.scatter(xb,yb,zb,c='b',s=1)\n\tplt.show()\n\nclass GridData:\n\tdef __init__(self,datafile,labelfile):\n\t\tf = open(datafile,'rb')\n\t\tf2 = open(labelfile,'r')\n\t\tself.samples = []\n\t\tself.labels = []\n\t\tself.label_names = []\n\t\tself.data_size = 30\n\t\tself.source = datafile\n\t\tsample_size = self.data_size ** 3\n\t\tfile_size = os.path.getsize(datafile)\n\t\tself.num_samples = file_size / sample_size\n\t\tfor i in range(self.num_samples):\n\t\t\tarr = numpy.fromfile(f,dtype=numpy.int8,count=sample_size)\n\t\t\tmatrix = arr.reshape((self.data_size,self.data_size,self.data_size))\n\t\t\tself.samples.append(matrix.transpose())\n\t\t\tl = f2.readline().split()\n\t\t\tself.labels.append(int(l[0]))\n\t\t\tself.label_names.append(l[1])\n\t\n\tdef __str__(self):\n\t\treturn \"<%s %d samples (%dx%dx%d)>\" % (self.source,self.num_samples,self.data_size,self.data_size,self.data_size)\n\n\tdef __repr__(self):\n\t\treturn str(self)\n\nif __name__==\"__main__\":\n\tpartial_view_file = 'partial_view_single.data'\n\tcomplete_view_file = 'complete_view_single.data'\n\tlabel_file = 'labels_single.data'\n\n\tpartial_views = GridData(partial_view_file,label_file)\n\tcomplete_views = GridData(complete_view_file,label_file)\n\tprint(partial_views)\n\tprint(complete_views)\n\n\tfor i in range(partial_views.num_samples):\n\t\tplotObject(partial_views.samples[i])\n\t\tplotObject(complete_views.samples[i])\n",
"step-ids": [
2,
4,
5,
7,
8
]
}
|
[
2,
4,
5,
7,
8
] |
import os
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Build import cythonize
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.join(CUR_DIR, 'src')
TPM_DIR = os.path.join(SRC_DIR, 'tpm')
include_dirs = [SRC_DIR]
src_files = ["pytpm/_tpm.pyx"]
# TPM library and path to the library.
library_dirs = [os.path.expanduser("~/lib/tpm")]
libraries = ['tpm']
ext_modules = [
Extension(
"pytpm._tpm", src_files,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries
)
]
setup(
name='pytpm',
packages=['pytpm'],
package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']},
ext_modules=cythonize(ext_modules)
)
|
normal
|
{
"blob_id": "3875d85bef37900f9066c108dc720b364cbafffa",
"index": 8476,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=\n cythonize(ext_modules))\n",
"step-3": "<mask token>\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.join(CUR_DIR, 'src')\nTPM_DIR = os.path.join(SRC_DIR, 'tpm')\ninclude_dirs = [SRC_DIR]\nsrc_files = ['pytpm/_tpm.pyx']\nlibrary_dirs = [os.path.expanduser('~/lib/tpm')]\nlibraries = ['tpm']\next_modules = [Extension('pytpm._tpm', src_files, include_dirs=include_dirs,\n library_dirs=library_dirs, libraries=libraries)]\nsetup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=\n cythonize(ext_modules))\n",
"step-4": "import os\nfrom distutils.core import setup\nfrom Cython.Distutils import Extension\nfrom Cython.Build import cythonize\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.join(CUR_DIR, 'src')\nTPM_DIR = os.path.join(SRC_DIR, 'tpm')\ninclude_dirs = [SRC_DIR]\nsrc_files = ['pytpm/_tpm.pyx']\nlibrary_dirs = [os.path.expanduser('~/lib/tpm')]\nlibraries = ['tpm']\next_modules = [Extension('pytpm._tpm', src_files, include_dirs=include_dirs,\n library_dirs=library_dirs, libraries=libraries)]\nsetup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=\n cythonize(ext_modules))\n",
"step-5": "import os\nfrom distutils.core import setup\nfrom Cython.Distutils import Extension\nfrom Cython.Build import cythonize\n\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.join(CUR_DIR, 'src')\nTPM_DIR = os.path.join(SRC_DIR, 'tpm')\ninclude_dirs = [SRC_DIR]\nsrc_files = [\"pytpm/_tpm.pyx\"]\n\n# TPM library and path to the library.\nlibrary_dirs = [os.path.expanduser(\"~/lib/tpm\")]\nlibraries = ['tpm']\n\next_modules = [\n Extension(\n \"pytpm._tpm\", src_files,\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries\n )\n]\n\nsetup(\n name='pytpm',\n packages=['pytpm'],\n package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']},\n ext_modules=cythonize(ext_modules)\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/python
from bs4 import BeautifulSoup
import requests
import sys
def exit(err):
print err
sys.exit(0)
def get_text(node, lower = True):
if lower:
return (''.join(node.findAll(text = True))).strip().lower()
return (''.join(node.findAll(text = True))).strip()
def get_method_signature(tag):
gold = 'Method signature:'.lower()
return tag.name == "td" and get_text(tag) == gold
def get_returns(tag):
gold = 'Returns:'.lower()
return tag.name == "pre" and gold in get_text(tag)
def main():
if len(sys.argv) != 3:
exit("Usage: %s <srm_number> <class_name>" % sys.argv[0])
srm = sys.argv[1].strip().lower()
class_name = sys.argv[2].strip().lower()
domain = "http://community.topcoder.com"
search_url = "%(domain)s/tc?module=ProblemArchive&class=%(class_name)s"
data = requests.get(search_url % locals()).text
# f = open('/tmp/data.html', 'w')
# f.write(data)
# f.close()
# data = open('/tmp/data.html', 'r')
soup = BeautifulSoup(data)
result_table = None
result_table_string = 'Challenge'
tables = soup.findAll('table')
tables.reverse()
for table in tables:
if result_table_string.lower() in get_text(table):
result_table = table
break
else:
exit("no problem found, please check class name")
result_row = None
actual_class_name = None
for row in result_table.findAll('tr'):
cells = row.findAll('td')
if len(cells) < 3:
continue
if get_text(cells[1]) == class_name and srm in get_text(cells[2]):
actual_class_name = get_text(cells[1], lower = False)
result_row = row
break
else:
exit("no problem found, please check class name and SRM number")
problem_url = "%s%s" % (domain, cells[1].find('a').get('href'))
data = requests.get(problem_url).text
# f = open('/tmp/problem.html', 'w')
# f.write(data)
# f.close()
#data = open('/tmp/problem.html', 'r')
soup = BeautifulSoup(data)
try:
method_signature_text = soup.findAll(get_method_signature)[-1]
method_signature = method_signature_text.nextSibling.string
returns_tr = method_signature_text.parent.previousSibling
return_type = returns_tr.findAll('td')[1].string.strip()
parameters_tr = returns_tr.previousSibling
parameters = parameters_tr.findAll('td')[1].string.split(",")
method_tr = parameters_tr.previousSibling
method_name = method_tr.findAll('td')[1].string.strip()
test_cases = soup.findAll(get_returns)
expected_return_values = []
inputs = []
for i in range(len(test_cases)):
inputs.append([])
for i, test_case in enumerate(test_cases):
expected_return_values.append(test_case.string.strip().split(": ")[1])
input_values = test_case.parent.parent.previousSibling.findAll('pre')
for input_value in input_values:
inputs[i].append(input_value.string.strip())
except:
raise
exit("error getting method signature, no luck")
# inject test cases into template
spaces = " "
input_test_case = "%(parameter)s var_%(index_1)d_%(index_2)d = %(value)s;\n"
invoke_method = "%(return_type)s expected_%(index_1)d = %(lower_actual_class_name)s.%(method_name)s(%(method_params)s);\n"
if return_type == "String":
compare_outputs = "System.out.println((expected_%(index_1)d.equals(%(expected_value)s) ? \"Passed\" : \"Failed\") + \" for case %(index_1)d\");"
else:
compare_outputs = "System.out.println(((expected_%(index_1)d == %(expected_value)s) ? \"Passed\" : \"Failed\") + \" for case %(index_1)d\");"
compare_outputs += "\n"
lower_actual_class_name = actual_class_name.lower()
test_case_str = ""
for index_1, input_case in enumerate(inputs):
# declare the inputs
method_params_list = []
for index_2, parameter in enumerate(parameters):
value = input_case[index_2]
test_case_str += spaces
test_case_str += input_test_case % locals()
method_params_list.append("var_%(index_1)d_%(index_2)d" % locals())
# invoke the function
method_params = ','.join(method_params_list)
test_case_str += spaces
test_case_str += invoke_method % locals()
# compare the output
expected_value = expected_return_values[index_1]
test_case_str += spaces
test_case_str += compare_outputs % locals()
# inject everything else into final template
template = open('template.java', 'r').read()
fp = open('%(actual_class_name)s.java' % locals(), 'w')
fp.write(template % locals())
fp.close()
print "done :) generated %(actual_class_name)s.java" % locals()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "e3119979028d3dd4e1061563db4ec20607e744d1",
"index": 3749,
"step-1": "#! /usr/bin/python\n\nfrom bs4 import BeautifulSoup\n\nimport requests\nimport sys\n\ndef exit(err):\n print err\n sys.exit(0)\n\ndef get_text(node, lower = True):\n if lower:\n return (''.join(node.findAll(text = True))).strip().lower()\n return (''.join(node.findAll(text = True))).strip()\n\ndef get_method_signature(tag):\n gold = 'Method signature:'.lower()\n return tag.name == \"td\" and get_text(tag) == gold\n\ndef get_returns(tag):\n gold = 'Returns:'.lower()\n return tag.name == \"pre\" and gold in get_text(tag)\n\ndef main():\n\n if len(sys.argv) != 3:\n exit(\"Usage: %s <srm_number> <class_name>\" % sys.argv[0])\n\n srm = sys.argv[1].strip().lower()\n class_name = sys.argv[2].strip().lower()\n\n domain = \"http://community.topcoder.com\"\n search_url = \"%(domain)s/tc?module=ProblemArchive&class=%(class_name)s\"\n\n data = requests.get(search_url % locals()).text\n # f = open('/tmp/data.html', 'w')\n # f.write(data)\n # f.close()\n # data = open('/tmp/data.html', 'r')\n\n soup = BeautifulSoup(data)\n result_table = None\n result_table_string = 'Challenge'\n tables = soup.findAll('table')\n tables.reverse()\n for table in tables:\n if result_table_string.lower() in get_text(table):\n result_table = table\n break\n else:\n exit(\"no problem found, please check class name\")\n\n result_row = None\n actual_class_name = None\n for row in result_table.findAll('tr'):\n cells = row.findAll('td')\n if len(cells) < 3:\n continue\n if get_text(cells[1]) == class_name and srm in get_text(cells[2]):\n actual_class_name = get_text(cells[1], lower = False)\n result_row = row\n break\n else:\n exit(\"no problem found, please check class name and SRM number\")\n\n problem_url = \"%s%s\" % (domain, cells[1].find('a').get('href'))\n\n data = requests.get(problem_url).text\n # f = open('/tmp/problem.html', 'w')\n # f.write(data)\n # f.close()\n #data = open('/tmp/problem.html', 'r')\n\n soup = BeautifulSoup(data)\n try:\n method_signature_text = soup.findAll(get_method_signature)[-1]\n method_signature = method_signature_text.nextSibling.string\n returns_tr = method_signature_text.parent.previousSibling\n return_type = returns_tr.findAll('td')[1].string.strip()\n parameters_tr = returns_tr.previousSibling\n parameters = parameters_tr.findAll('td')[1].string.split(\",\")\n method_tr = parameters_tr.previousSibling\n method_name = method_tr.findAll('td')[1].string.strip()\n test_cases = soup.findAll(get_returns)\n expected_return_values = []\n inputs = []\n for i in range(len(test_cases)):\n inputs.append([])\n for i, test_case in enumerate(test_cases):\n expected_return_values.append(test_case.string.strip().split(\": \")[1])\n input_values = test_case.parent.parent.previousSibling.findAll('pre')\n for input_value in input_values:\n inputs[i].append(input_value.string.strip())\n except:\n raise\n exit(\"error getting method signature, no luck\")\n\n # inject test cases into template\n spaces = \" \"\n input_test_case = \"%(parameter)s var_%(index_1)d_%(index_2)d = %(value)s;\\n\"\n invoke_method = \"%(return_type)s expected_%(index_1)d = %(lower_actual_class_name)s.%(method_name)s(%(method_params)s);\\n\"\n if return_type == \"String\":\n compare_outputs = \"System.out.println((expected_%(index_1)d.equals(%(expected_value)s) ? \\\"Passed\\\" : \\\"Failed\\\") + \\\" for case %(index_1)d\\\");\"\n else:\n compare_outputs = \"System.out.println(((expected_%(index_1)d == %(expected_value)s) ? \\\"Passed\\\" : \\\"Failed\\\") + \\\" for case %(index_1)d\\\");\"\n compare_outputs += \"\\n\"\n lower_actual_class_name = actual_class_name.lower()\n test_case_str = \"\"\n for index_1, input_case in enumerate(inputs):\n # declare the inputs\n method_params_list = []\n for index_2, parameter in enumerate(parameters):\n value = input_case[index_2]\n test_case_str += spaces\n test_case_str += input_test_case % locals()\n method_params_list.append(\"var_%(index_1)d_%(index_2)d\" % locals())\n # invoke the function\n method_params = ','.join(method_params_list)\n test_case_str += spaces\n test_case_str += invoke_method % locals()\n # compare the output\n expected_value = expected_return_values[index_1]\n test_case_str += spaces\n test_case_str += compare_outputs % locals()\n\n # inject everything else into final template\n template = open('template.java', 'r').read()\n fp = open('%(actual_class_name)s.java' % locals(), 'w')\n fp.write(template % locals())\n fp.close()\n print \"done :) generated %(actual_class_name)s.java\" % locals()\n\nif __name__ == \"__main__\":\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Access IP Camera in Python OpenCV"""
import cv2
#stream = cv2.VideoCapture('protocol://IP:port/1')
# Use the next line if your camera has a username and password
stream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')
while True:
r, f = stream.read()
cv2.imshow('IP Camera stream',f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "f9db3c96bc3fd4911640d0428672c87072564b0d",
"index": 710,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nstream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "<mask token>\nimport cv2\nstream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video')\nwhile True:\n r, f = stream.read()\n cv2.imshow('IP Camera stream', f)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "\"\"\"Access IP Camera in Python OpenCV\"\"\"\r\n\r\nimport cv2\r\n\r\n#stream = cv2.VideoCapture('protocol://IP:port/1')\r\n\r\n# Use the next line if your camera has a username and password\r\nstream = cv2.VideoCapture('rtsp://SeniorDesign:[email protected]:554/video') \r\n\r\nwhile True:\r\n\r\n r, f = stream.read()\r\n cv2.imshow('IP Camera stream',f)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
import pymysql
dbServerName = "127.0.0.1"
dbUser = "root"
dbPassword = "1448"
dbName = "TestDataBase2"
charSet = "utf8mb4"
cusrorType = pymysql.cursors.DictCursor
connectionObject = pymysql.connect(host=dbServerName, user=dbUser, password=dbPassword,
db=dbName, charset=charSet,cursorclass=cusrorType)
try:
# Create a cursor object
cursorObject = connectionObject.cursor()
# SQL query string
sqlQuery = "CREATE TABLE Liceu(id int, Nume varchar(32), Prenume varchar(32), Legitimatie int)"
# Execute the sqlQuery
cursorObject.execute(sqlQuery)
# SQL query string
sqlQuery = "show tables"
# Execute the sqlQuery
cursorObject.execute(sqlQuery)
#mycursor = mydb.cursor()
sql = "INSERT INTO Liceu(id, Nume, Prenume, Leg) VALUES (%n, %s, %s, %n)"
val = (5, 'Highway 21', 'sfsdfs', 53)
cursorObject.execute(sql, val)
cursorObject.commit()
print(mycursor.rowcount, "record inserted.")
#Fetch all the rows
rows = cursorObject.fetchall()
for row in rows:
print(row)
except Exception as e:
print("Exeception occured:{}".format(e))
finally:
connectionObject.close()
|
normal
|
{
"blob_id": "1c85ccaacfb47808e9e74f2a18bfe3b309891cf4",
"index": 877,
"step-1": "#!/usr/bin/python\nimport pymysql\n\n\ndbServerName = \"127.0.0.1\"\n\ndbUser = \"root\"\n\ndbPassword = \"1448\"\n\ndbName = \"TestDataBase2\"\n\ncharSet = \"utf8mb4\"\n\ncusrorType = pymysql.cursors.DictCursor\n\n\n\nconnectionObject = pymysql.connect(host=dbServerName, user=dbUser, password=dbPassword,\n\n db=dbName, charset=charSet,cursorclass=cusrorType)\ntry:\n\n\n\n # Create a cursor object\n\n cursorObject = connectionObject.cursor()\n\n\n\n # SQL query string\n\n sqlQuery = \"CREATE TABLE Liceu(id int, Nume varchar(32), Prenume varchar(32), Legitimatie int)\"\n\n\n\n # Execute the sqlQuery\n\n cursorObject.execute(sqlQuery)\n\n\n\n # SQL query string\n\n sqlQuery = \"show tables\"\n\n\n\n # Execute the sqlQuery\n\n cursorObject.execute(sqlQuery)\n\n#mycursor = mydb.cursor()\n\nsql = \"INSERT INTO Liceu(id, Nume, Prenume, Leg) VALUES (%n, %s, %s, %n)\"\nval = (5, 'Highway 21', 'sfsdfs', 53)\ncursorObject.execute(sql, val)\n\ncursorObject.commit()\n\nprint(mycursor.rowcount, \"record inserted.\")\n\n\n\n #Fetch all the rows\n\n rows = cursorObject.fetchall()\n\n\n\n for row in rows:\n\n print(row)\n\nexcept Exception as e:\n\n print(\"Exeception occured:{}\".format(e))\n\n\nfinally:\n\n connectionObject.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# noinspection PyStatementEffect
{
'name': 'ldap_user',
'summary': '',
'description': '域账号用户管理,登录及查询用户信息',
'author': '',
'website': '',
'source': {'git': 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},
'category': '',
'version': '0.1',
'api': {
'/user/token': 'user_api.gen_token',
'/user/captcha': 'user_api.gen_captcha',
'/user/login': {
'POST': 'user_api.login'
},
'/user/search': 'user_api.search_users'
},
# any plugin necessary for this one to work correctly
'depends': ['base', 'base_api_wrapper', 'redis_client', 'i18n']
}
|
normal
|
{
"blob_id": "b95619f3f52ff3747e38ecc153123962d0122a4d",
"index": 387,
"step-1": "<mask token>\n",
"step-2": "{'name': 'ldap_user', 'summary': '', 'description': '域账号用户管理,登录及查询用户信息',\n 'author': '', 'website': '', 'source': {'git':\n 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},\n 'category': '', 'version': '0.1', 'api': {'/user/token':\n 'user_api.gen_token', '/user/captcha': 'user_api.gen_captcha',\n '/user/login': {'POST': 'user_api.login'}, '/user/search':\n 'user_api.search_users'}, 'depends': ['base', 'base_api_wrapper',\n 'redis_client', 'i18n']}\n",
"step-3": "# noinspection PyStatementEffect\n{\n 'name': 'ldap_user',\n 'summary': '',\n 'description': '域账号用户管理,登录及查询用户信息',\n 'author': '',\n 'website': '',\n 'source': {'git': 'https://github.com/LeiQiao/Parasite-Plugins.git', 'branch': 'master'},\n\n 'category': '',\n 'version': '0.1',\n\n 'api': {\n '/user/token': 'user_api.gen_token',\n '/user/captcha': 'user_api.gen_captcha',\n '/user/login': {\n 'POST': 'user_api.login'\n },\n '/user/search': 'user_api.search_users'\n },\n\n # any plugin necessary for this one to work correctly\n 'depends': ['base', 'base_api_wrapper', 'redis_client', 'i18n']\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from fastapi import APIRouter
from .endpoints import submissions
def get_api_router():
api_router = APIRouter()
api_router.include_router(submissions.router,
prefix="/submissions",
tags=["submissions"])
# api_router.include_router(users.router, prefix="/users", tags=["users"])
return api_router
|
normal
|
{
"blob_id": "844c9af4f0d4ca33e7c69b72f9886f58ceebefdb",
"index": 2719,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router, prefix='/submissions',\n tags=['submissions'])\n return api_router\n",
"step-3": "from fastapi import APIRouter\nfrom .endpoints import submissions\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router, prefix='/submissions',\n tags=['submissions'])\n return api_router\n",
"step-4": "from fastapi import APIRouter\n\nfrom .endpoints import submissions\n\n\ndef get_api_router():\n api_router = APIRouter()\n api_router.include_router(submissions.router,\n prefix=\"/submissions\",\n tags=[\"submissions\"])\n # api_router.include_router(users.router, prefix=\"/users\", tags=[\"users\"])\n return api_router\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import webapp2 # web application framework
import jinja2 # template engine
import os # access file system
import csv
from google.appengine.api import users # Google account authentication
from google.appengine.ext import db # datastore
# initialise template
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class Contact(db.Expando): # allows for different number of fields
''' User data model '''
pid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster
name = db.StringProperty(required=True)
class12 = db.StringProperty(required=True)
email = db.EmailProperty(required=True)
handphone = db.StringProperty(required=False)
tickets_csjh = db.StringProperty(required=False)
tickets_edssh = db.StringProperty(required=False)
remark = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
''' Home page handler '''
def get(self):
''' Show home page '''
# import data
# check if valid Google account
# school_register = csv.reader(open('data.csv'),delimiter=',')
# found = False
user = users.get_current_user()
# for student in school_register: # if valid logged in user
# if student[0] == self.request.get('pid'):
# contact = student
# found = True
# break
if user:
# logout link
url = users.create_logout_url(self.request.uri)
# logout text
url_linktext = 'Logout'
# retrieve user record from datastore
# may get multiple records, so in order to get one record:
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result: #if user record found
contact = result[0]
greeting = ("Welcome %s!" % (contact.name,)) #1 item in couple = put comma
else: #not found
contact = "Invalid dhs.sg user"
greeting = ""
else: # not logged in
# login link
url = users.create_login_url(self.request.uri)
# login text
url_linktext = 'Login'
contact = "Not authorised"
greeting = "You need to"
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
}
# create index.html template
template = jinja_environment.get_template('index.html')
# associate template values with template
self.response.out.write(template.render(template_values))
class Submit(webapp2.RequestHandler):
''' Submit form '''
def post(self):
if self.request.get('submit'):
updated_handphone = self.request.get('handphone')
updated_tickets_csjh = self.request.get('tickets_csjh')
updated_tickets_edssh = self.request.get('tickets_edssh')
updated_remark = self.request.get('remark')
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
user = users.get_current_user()
query = Contact.gql('WHERE pid = :1', user.nickname())
result = query.fetch(1)
if result:
contact = result[0]
greeting = ("User: %s" % (contact.name,))
contact.handphone = updated_handphone
contact.tickets_csjh = updated_tickets_csjh
contact.tickets_edssh = updated_tickets_edssh
contact.remark = db.Text(updated_remark)
contact.put()
else:
self.response.out.write('Reservation failed!')
template_values = {
'contact': contact,
'greeting': greeting,
'url': url,
'url_linktext': url_linktext,
'contact.handphone': updated_handphone,
'contact.tickets_csjh': updated_tickets_csjh,
'contact.tickets_edssh': updated_tickets_edssh,
'contact.remark': updated_remark,
}
template = jinja_environment.get_template('submit.html')
self.response.out.write(template.render(template_values))
# main
contact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = '[email protected]', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')
contact2.put()
app = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],
debug=True)
|
normal
|
{
"blob_id": "aeef27d667f95e3818f73533439385ea949b96a4",
"index": 2445,
"step-1": "<mask token>\n\n\nclass Submit(webapp2.RequestHandler):\n <mask token>\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Contact(db.Expando):\n <mask token>\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\n<mask token>\n",
"step-3": "<mask token>\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)))\n\n\nclass Contact(db.Expando):\n \"\"\" User data model \"\"\"\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\ncontact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',\n email='[email protected]', handphone='', tickets_csjh='', tickets_edssh\n ='', remark='')\ncontact2.put()\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],\n debug=True)\n",
"step-4": "import webapp2\nimport jinja2\nimport os\nimport csv\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.\n path.dirname(__file__)))\n\n\nclass Contact(db.Expando):\n \"\"\" User data model \"\"\"\n pid = db.StringProperty(required=True)\n name = db.StringProperty(required=True)\n class12 = db.StringProperty(required=True)\n email = db.EmailProperty(required=True)\n handphone = db.StringProperty(required=False)\n tickets_csjh = db.StringProperty(required=False)\n tickets_edssh = db.StringProperty(required=False)\n remark = db.TextProperty()\n\n\nclass MainHandler(webapp2.RequestHandler):\n \"\"\" Home page handler \"\"\"\n\n def get(self):\n \"\"\" Show home page \"\"\"\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'Welcome %s!' % (contact.name,)\n else:\n contact = 'Invalid dhs.sg user'\n greeting = ''\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n contact = 'Not authorised'\n greeting = 'You need to'\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext}\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n\n\nclass Submit(webapp2.RequestHandler):\n \"\"\" Submit form \"\"\"\n\n def post(self):\n if self.request.get('submit'):\n updated_handphone = self.request.get('handphone')\n updated_tickets_csjh = self.request.get('tickets_csjh')\n updated_tickets_edssh = self.request.get('tickets_edssh')\n updated_remark = self.request.get('remark')\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n user = users.get_current_user()\n query = Contact.gql('WHERE pid = :1', user.nickname())\n result = query.fetch(1)\n if result:\n contact = result[0]\n greeting = 'User: %s' % (contact.name,)\n contact.handphone = updated_handphone\n contact.tickets_csjh = updated_tickets_csjh\n contact.tickets_edssh = updated_tickets_edssh\n contact.remark = db.Text(updated_remark)\n contact.put()\n else:\n self.response.out.write('Reservation failed!')\n template_values = {'contact': contact, 'greeting': greeting, 'url':\n url, 'url_linktext': url_linktext, 'contact.handphone':\n updated_handphone, 'contact.tickets_csjh': updated_tickets_csjh,\n 'contact.tickets_edssh': updated_tickets_edssh,\n 'contact.remark': updated_remark}\n template = jinja_environment.get_template('submit.html')\n self.response.out.write(template.render(template_values))\n\n\ncontact2 = Contact(pid='lim.ahseng', name='Lim Ah Seng', class12='5C99',\n email='[email protected]', handphone='', tickets_csjh='', tickets_edssh\n ='', remark='')\ncontact2.put()\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)],\n debug=True)\n",
"step-5": "#!/usr/bin/env python\n\nimport webapp2 # web application framework\nimport jinja2 # template engine\nimport os \t # access file system\nimport csv\nfrom google.appengine.api import users\t# Google account authentication\nfrom google.appengine.ext import db\t\t# datastore\n\n# initialise template\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nclass Contact(db.Expando): # allows for different number of fields\n\t''' User data model '''\n\tpid = db.StringProperty(required=True) # string = 500 char, allow field to be indexed, perform faster\n\tname = db.StringProperty(required=True)\n\tclass12 = db.StringProperty(required=True)\n\temail = db.EmailProperty(required=True)\n\thandphone = db.StringProperty(required=False)\n\ttickets_csjh = db.StringProperty(required=False)\n\ttickets_edssh = db.StringProperty(required=False)\n\tremark = db.TextProperty()\n\n\t\nclass MainHandler(webapp2.RequestHandler):\n\t''' Home page handler '''\n\tdef get(self):\n\t\t''' Show home page '''\n\t\t# import data\n\t\t# check if valid Google account\n#\t\tschool_register = csv.reader(open('data.csv'),delimiter=',')\n#\t\tfound = False\n\t\tuser = users.get_current_user()\n\t\n#\t\tfor student in school_register:\t# if valid logged in user\n#\t\t\tif student[0] == self.request.get('pid'):\n#\t\t\t\tcontact = student\n#\t\t\t\tfound = True\n#\t\t\t\tbreak\n\n\t\tif user: \n\t\t\t# logout link\n\t\t\turl = users.create_logout_url(self.request.uri)\n\t\t\t# logout text\n\t\t\turl_linktext = 'Logout'\n\t\t\t# retrieve user record from datastore\n\t\t\t# may get multiple records, so in order to get one record:\n\t\t\tquery = Contact.gql('WHERE pid = :1', user.nickname())\n\t\t\tresult = query.fetch(1)\n\t\t\tif result: #if user record found\n\t\t\t\tcontact = result[0]\n\t\t\t\tgreeting = (\"Welcome %s!\" % (contact.name,)) #1 item in couple = put comma\n\t\t\telse: #not found\n\t\t\t\tcontact = \"Invalid dhs.sg user\"\n\t\t\t\tgreeting = \"\"\n\t\t\t\n\t\telse: # not logged in \n\t\t\t\t# login link\n\t\t\turl = users.create_login_url(self.request.uri)\n\t\t\t\t# login text\n\t\t\turl_linktext = 'Login'\n\t\t\tcontact = \"Not authorised\"\n\t\t\tgreeting = \"You need to\"\n\t\t\t\n\t\ttemplate_values = {\n\t\t\t'contact': contact,\n\t\t\t'greeting': greeting,\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t}\n\t\t\n\t\t# create index.html template\n\t\ttemplate = jinja_environment.get_template('index.html')\n\t\t# associate template values with template\n\t\tself.response.out.write(template.render(template_values))\n\nclass Submit(webapp2.RequestHandler):\n\t''' Submit form '''\n\tdef post(self):\n\t\tif self.request.get('submit'):\n\t\t\tupdated_handphone = self.request.get('handphone')\n\t\t\tupdated_tickets_csjh = self.request.get('tickets_csjh')\n\t\t\tupdated_tickets_edssh = self.request.get('tickets_edssh')\n\t\t\tupdated_remark = self.request.get('remark')\n\t\t\turl = users.create_logout_url(self.request.uri)\n\t\t\turl_linktext = 'Logout'\n\t\t\tuser = users.get_current_user()\n\t\t\tquery = Contact.gql('WHERE pid = :1', user.nickname())\n\t\t\tresult = query.fetch(1)\n\t\t\t\n\t\t\tif result: \n\t\t\t\tcontact = result[0]\n\t\t\t\tgreeting = (\"User: %s\" % (contact.name,)) \n\t\t\t\tcontact.handphone = updated_handphone\n\t\t\t\tcontact.tickets_csjh = updated_tickets_csjh\n\t\t\t\tcontact.tickets_edssh = updated_tickets_edssh\n\t\t\t\tcontact.remark = db.Text(updated_remark)\n\t\t\t\tcontact.put()\n\t\t\telse: \t\n\t\t\t\tself.response.out.write('Reservation failed!')\n\t\n\t\t\n\t\ttemplate_values = {\n\t\t\t'contact': contact,\n\t\t\t'greeting': greeting,\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t\t'contact.handphone': updated_handphone,\n\t\t\t'contact.tickets_csjh': updated_tickets_csjh,\n\t\t\t'contact.tickets_edssh': updated_tickets_edssh,\n\t\t\t'contact.remark': updated_remark,\n\t\t}\n\t\t\n\t\ttemplate = jinja_environment.get_template('submit.html') \n\t\tself.response.out.write(template.render(template_values))\n\n# main\n\ncontact2 = Contact(pid = 'lim.ahseng', name = 'Lim Ah Seng', class12 = '5C99', email = '[email protected]', handphone = '', tickets_csjh = '', tickets_edssh = '', remark = '')\ncontact2.put()\n\t\napp = webapp2.WSGIApplication([('/', MainHandler), ('/submit', Submit)], \n\t\t\t\t\t\t\t\tdebug=True)\n\n \n",
"step-ids": [
2,
8,
11,
12,
13
]
}
|
[
2,
8,
11,
12,
13
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20180402_2344'),
]
operations = [
migrations.CreateModel(
name='HeriInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('hcontent', tinymce.models.HTMLField()),
],
),
migrations.AlterField(
model_name='books',
name='type_id',
field=models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[('ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE', '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'), ('PYTHON', 'python')]),
),
]
|
normal
|
{
"blob_id": "2c4fe8015968b8a78c7b2ea33ac5e21e01c82e6e",
"index": 2818,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('book', '0002_auto_20180402_2344')]\n operations = [migrations.CreateModel(name='HeriInfo', fields=[('id',\n models.AutoField(verbose_name='ID', primary_key=True, auto_created=\n True, serialize=False)), ('hcontent', tinymce.models.HTMLField())]),\n migrations.AlterField(model_name='books', name='type_id', field=\n models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[(\n 'ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE',\n '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'),\n ('PYTHON', 'python')]))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nimport tinymce.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('book', '0002_auto_20180402_2344')]\n operations = [migrations.CreateModel(name='HeriInfo', fields=[('id',\n models.AutoField(verbose_name='ID', primary_key=True, auto_created=\n True, serialize=False)), ('hcontent', tinymce.models.HTMLField())]),\n migrations.AlterField(model_name='books', name='type_id', field=\n models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[(\n 'ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE',\n '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'),\n ('PYTHON', 'python')]))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport tinymce.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('book', '0002_auto_20180402_2344'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HeriInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('hcontent', tinymce.models.HTMLField()),\n ],\n ),\n migrations.AlterField(\n model_name='books',\n name='type_id',\n field=models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[('ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE', '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'), ('PYTHON', 'python')]),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/local/bin/python
import cgi
import pymysql
import pymysql.cursors
import binascii
import os
from mylib import siteLines
import threading
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(":")[0]
connection = siteLines()
with connection.cursor() as cursor:
# Read a single record
sql = f"SELECT `IP` FROM `sites` WHERE `IP`=\'{onlyIp}\'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else: pass
def SiteStringLine(ip, host, pagel, objects, title):
connection = siteLines()
with connection:
with connection.cursor() as cursor:
# Create a new record
sql = f"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES (\'{ip}\', \'{host}\', \'{pagel}\', \'{objects}\', \'{title}\')"
cursor.execute(sql)
connection.commit()
form = cgi.FieldStorage()
open("gates.log", "a+", encoding="utf-8").write(str(form) + "\n")
if form.__contains__("host"):
ip = form.__contains__("ip")
host = form.__contains__("host")
pagel = form.__contains__("pagel")
objects = form.__contains__("words")
title = form.__contains__("title")
thread0 = threading.Thread(target = checkStringLine, args = (form["ip"].value, form["host"].value, form["pagel"].value, form["words"].value, form["title"].value))
thread0.start()
|
normal
|
{
"blob_id": "6c5c07dadbe7ec70a210ee42e756be0d710c0993",
"index": 5272,
"step-1": "<mask token>\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\ndef SiteStringLine(ip, host, pagel, objects, title):\n connection = siteLines()\n with connection:\n with connection.cursor() as cursor:\n sql = (\n f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')\"\n )\n cursor.execute(sql)\n connection.commit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\ndef SiteStringLine(ip, host, pagel, objects, title):\n connection = siteLines()\n with connection:\n with connection.cursor() as cursor:\n sql = (\n f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')\"\n )\n cursor.execute(sql)\n connection.commit()\n\n\nform = cgi.FieldStorage()\nopen('gates.log', 'a+', encoding='utf-8').write(str(form) + '\\n')\nif form.__contains__('host'):\n ip = form.__contains__('ip')\n host = form.__contains__('host')\n pagel = form.__contains__('pagel')\n objects = form.__contains__('words')\n title = form.__contains__('title')\n thread0 = threading.Thread(target=checkStringLine, args=(form['ip'].\n value, form['host'].value, form['pagel'].value, form['words'].value,\n form['title'].value))\n thread0.start()\n",
"step-4": "import cgi\nimport pymysql\nimport pymysql.cursors\nimport binascii\nimport os\nfrom mylib import siteLines\nimport threading\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\ndef SiteStringLine(ip, host, pagel, objects, title):\n connection = siteLines()\n with connection:\n with connection.cursor() as cursor:\n sql = (\n f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')\"\n )\n cursor.execute(sql)\n connection.commit()\n\n\nform = cgi.FieldStorage()\nopen('gates.log', 'a+', encoding='utf-8').write(str(form) + '\\n')\nif form.__contains__('host'):\n ip = form.__contains__('ip')\n host = form.__contains__('host')\n pagel = form.__contains__('pagel')\n objects = form.__contains__('words')\n title = form.__contains__('title')\n thread0 = threading.Thread(target=checkStringLine, args=(form['ip'].\n value, form['host'].value, form['pagel'].value, form['words'].value,\n form['title'].value))\n thread0.start()\n",
"step-5": "#!/usr/local/bin/python\r\nimport cgi\r\nimport pymysql\r\nimport pymysql.cursors\r\nimport binascii\r\nimport os\r\nfrom mylib import siteLines\r\nimport threading\r\n\r\ndef checkStringLine(ip, host, pagel, objects, title):\r\n onlyIp = ip.split(\":\")[0]\r\n connection = siteLines()\r\n with connection.cursor() as cursor:\r\n # Read a single record\r\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`=\\'{onlyIp}\\'\"\r\n cursor.execute(sql)\r\n result = cursor.fetchone()\r\n if result == None:\r\n SiteStringLine(ip, host, pagel, objects, title)\r\n else: pass\r\n\r\ndef SiteStringLine(ip, host, pagel, objects, title):\r\n connection = siteLines()\r\n with connection:\r\n with connection.cursor() as cursor:\r\n # Create a new record\r\n sql = f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES (\\'{ip}\\', \\'{host}\\', \\'{pagel}\\', \\'{objects}\\', \\'{title}\\')\"\r\n cursor.execute(sql)\r\n connection.commit()\r\n\r\n\r\nform = cgi.FieldStorage()\r\nopen(\"gates.log\", \"a+\", encoding=\"utf-8\").write(str(form) + \"\\n\")\r\nif form.__contains__(\"host\"):\r\n ip = form.__contains__(\"ip\")\r\n host = form.__contains__(\"host\")\r\n pagel = form.__contains__(\"pagel\")\r\n objects = form.__contains__(\"words\")\r\n title = form.__contains__(\"title\")\r\n thread0 = threading.Thread(target = checkStringLine, args = (form[\"ip\"].value, form[\"host\"].value, form[\"pagel\"].value, form[\"words\"].value, form[\"title\"].value))\r\n thread0.start()\r\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
import sys
import math
def get_max_sum(arr):
max_sum = -math.inf
for i in range(1, 5):
for j in range(1, 5):
temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][
j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]
max_sum = max(max_sum, temp)
return max_sum
def main():
sys_in = sys.stdin
sys_out = sys.stdout
arr = []
for _ in range(6):
temp = list(map(int, sys.stdin.readline().split()))
arr.append(temp)
print(get_max_sum(arr))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c99f1333c5ca3221e9932d9a9ba1d95a77924f0d",
"index": 351,
"step-1": "<mask token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport math\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import socket
import sys
TCP_IP = '192.168.149.129'
TCP_PORT = 5005
BUFFER_SIZE = 2000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
while 1:
print 'user data:'
content = sys.stdin.readline();
s.send(content)
data = s.recv(BUFFER_SIZE)
print "received data:", data
s.close()
|
normal
|
{
"blob_id": "5669476cc735f569263417b907e8f4a9802cd325",
"index": 3189,
"step-1": "import socket\nimport sys\nTCP_IP = '192.168.149.129'\nTCP_PORT = 5005\nBUFFER_SIZE = 2000\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \ns.connect((TCP_IP, TCP_PORT))\nwhile 1:\n print 'user data:'\n content = sys.stdin.readline();\n s.send(content)\n data = s.recv(BUFFER_SIZE)\n print \"received data:\", data\ns.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.1.7 on 2021-03-28 01:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('details', '0002_auto_20210310_1421'),
]
operations = [
migrations.AlterModelOptions(
name='detail',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
]
|
normal
|
{
"blob_id": "cdaceb2d8804e08f0b35b9b65f2d06695efad002",
"index": 6470,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('details', '0002_auto_20210310_1421')]\n operations = [migrations.AlterModelOptions(name='detail', options={\n 'get_latest_by': 'created', 'ordering': ['created']})]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('details', '0002_auto_20210310_1421')]\n operations = [migrations.AlterModelOptions(name='detail', options={\n 'get_latest_by': 'created', 'ordering': ['created']})]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-03-28 01:03\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('details', '0002_auto_20210310_1421'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='detail',\n options={'get_latest_by': 'created', 'ordering': ['created']},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import datetime
import requests
import pymysql
import pymongo
def insert_category(conn):
"""将商品的种类插入数据库 """
# 商品种类的 id 和对应的名称
categories_dict = {
66: "手机",
327: "腕表配饰",
65: "电脑办公",
67: "相机单反",
217: "平板数码",
179: "运动户外",
255: "家电家居",
1000: "其他",
}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = "insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)"
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
result = cursor.execute(sql, (category_id, category_name, create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = "https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}"
res = requests.get(brand_url.format(category_id=category_id))
# 所有的brand字典组成的列表
brands = json.loads(res.content.decode("utf-8")).get("brand_list")
brand_list += brands
except:
print("出错了:category_id:", category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en', 'category_id_1']
sql = "insert into goods_brand values (%s, %s, %s, %s, %s, %s)"
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get("brand_id"))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
brand_name = brand.get("brand_name")
brand_name_ch = brand.get("brand_name_ch") if brand.get("brand_name_ch") else brand_name
brand_name_en = brand.get("brand_name_en") if brand.get("brand_name_en") else brand_name
category_id = int(brand.get("category_id_1"))
category_id = category_id if category_id in category_id_list else 1000
# 插入数据库
result = cursor.execute(sql, (brand_id, create_time, brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
# 加入去重队列
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
# 数据库中的所有的字段 22 个
kws = ("product_name", "category_id_1", "brand_id", "product_desc",
"short_product_name", "sku_key_1", "sku_key_2", "sku_key_3", "product_flag",
"min_firstpay", "is_product_up_down", "real_amount", "mart_amount", "fq_num",
"product_info", "delivery_time", "gift_list", "fe_params", "slider_imgs",
"detail_imgs", "create_time")
# 插入除 商品 id 之外的字段
# sql = "insert into goods () values (%s, %s, %s, %s, %s, " \
# "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
sql = "insert into goods (good_name,category_id,brand_id,product_name,short_product_name," \
"sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount," \
"mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs," \
"create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
# 获取mongodb 中的数据
goods = GOODS.find()
for good in goods:
try:
data = []
# 商品 id 去重集合
# good_id_set = set()
for kw in kws[:-5]:
info = good["detail_data"].get(kw)
data.append(info)
# 单独处理复杂的项目
gift_list = " ".join([str(s) for s in good["detail_data"].get("gift_list")[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good["detail_data"].get("fe_params"))
data.append(fe_params)
slider_imgs = "||".join(good["slider_imgs"])
data.append(slider_imgs)
detail_imgs = "||".join(good["detail_imgs"])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
data.append(create_time)
# 判断 id 是否重复
# if good["good_id"] not in good_id_set:
with conn.cursor() as cursor:
cursor.execute("select brand_id from goods_brand")
# 查出所有的品牌 id
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute("select category_id from goods_category")
# 查出所有的种类 id
all_category_ids = [category_id[0] for category_id in cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(data[2])
cursor.execute(sql, tuple(data))
conn.commit()
# good_id_set.add(good["good_id"])
except Exception as e:
print(e)
continue
def main():
# MySQL 连接
conn = pymysql.connect(host="127.0.0.1", port=3306, user="root", password="123456",
db="test", charset="utf8", autocommit=False)
# 将分类插入数据库
# insert_category(conn)
# 将品牌插入数据库
# insert_brand(conn)
# 将商品插入数据库
# mongodb 连接
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN["fenqile"]["goods"]
insert_goods(conn, GOODS)
conn.close()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "b69e3f5e57adc8e89b6ff22fb4a10d2539e13ca3",
"index": 7200,
"step-1": "<mask token>\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',\n 'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',\n 'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',\n 'mart_amount', 'fq_num', 'product_info', 'delivery_time',\n 'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')\n sql = (\n 'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n )\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n for kw in kws[:-5]:\n info = good['detail_data'].get(kw)\n data.append(info)\n gift_list = ' '.join([str(s) for s in good['detail_data'].get(\n 'gift_list')[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good['detail_data'].get('fe_params'))\n data.append(fe_params)\n slider_imgs = '||'.join(good['slider_imgs'])\n data.append(slider_imgs)\n detail_imgs = '||'.join(good['detail_imgs'])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n data.append(create_time)\n with conn.cursor() as cursor:\n cursor.execute('select brand_id from goods_brand')\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute('select category_id from goods_category')\n all_category_ids = [category_id[0] for category_id in\n cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]\n ) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(\n data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',\n password='123456', db='test', charset='utf8', autocommit=False)\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN['fenqile']['goods']\n insert_goods(conn, GOODS)\n conn.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',\n 'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',\n 'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',\n 'mart_amount', 'fq_num', 'product_info', 'delivery_time',\n 'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')\n sql = (\n 'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n )\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n for kw in kws[:-5]:\n info = good['detail_data'].get(kw)\n data.append(info)\n gift_list = ' '.join([str(s) for s in good['detail_data'].get(\n 'gift_list')[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good['detail_data'].get('fe_params'))\n data.append(fe_params)\n slider_imgs = '||'.join(good['slider_imgs'])\n data.append(slider_imgs)\n detail_imgs = '||'.join(good['detail_imgs'])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n data.append(create_time)\n with conn.cursor() as cursor:\n cursor.execute('select brand_id from goods_brand')\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute('select category_id from goods_category')\n all_category_ids = [category_id[0] for category_id in\n cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]\n ) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(\n data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',\n password='123456', db='test', charset='utf8', autocommit=False)\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN['fenqile']['goods']\n insert_goods(conn, GOODS)\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\nimport datetime\nimport requests\nimport pymysql\nimport pymongo\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',\n 'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',\n 'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',\n 'mart_amount', 'fq_num', 'product_info', 'delivery_time',\n 'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')\n sql = (\n 'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n )\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n for kw in kws[:-5]:\n info = good['detail_data'].get(kw)\n data.append(info)\n gift_list = ' '.join([str(s) for s in good['detail_data'].get(\n 'gift_list')[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good['detail_data'].get('fe_params'))\n data.append(fe_params)\n slider_imgs = '||'.join(good['slider_imgs'])\n data.append(slider_imgs)\n detail_imgs = '||'.join(good['detail_imgs'])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n data.append(create_time)\n with conn.cursor() as cursor:\n cursor.execute('select brand_id from goods_brand')\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute('select category_id from goods_category')\n all_category_ids = [category_id[0] for category_id in\n cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]\n ) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(\n data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',\n password='123456', db='test', charset='utf8', autocommit=False)\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN['fenqile']['goods']\n insert_goods(conn, GOODS)\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\nimport json\nimport datetime\n\nimport requests\nimport pymysql\nimport pymongo\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n # 商品种类的 id 和对应的名称\n categories_dict = {\n 66: \"手机\",\n 327: \"腕表配饰\",\n 65: \"电脑办公\",\n 67: \"相机单反\",\n 217: \"平板数码\",\n 179: \"运动户外\",\n 255: \"家电家居\",\n 1000: \"其他\",\n }\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = \"insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)\"\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, \"%Y-%m-%d %H:%M:%S\")\n result = cursor.execute(sql, (category_id, category_name, create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = \"https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}\"\n res = requests.get(brand_url.format(category_id=category_id))\n # 所有的brand字典组成的列表\n brands = json.loads(res.content.decode(\"utf-8\")).get(\"brand_list\")\n brand_list += brands\n except:\n print(\"出错了:category_id:\", category_id)\n print()\n continue\n\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en', 'category_id_1']\n sql = \"insert into goods_brand values (%s, %s, %s, %s, %s, %s)\"\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get(\"brand_id\"))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, \"%Y-%m-%d %H:%M:%S\")\n brand_name = brand.get(\"brand_name\")\n brand_name_ch = brand.get(\"brand_name_ch\") if brand.get(\"brand_name_ch\") else brand_name\n brand_name_en = brand.get(\"brand_name_en\") if brand.get(\"brand_name_en\") else brand_name\n category_id = int(brand.get(\"category_id_1\"))\n category_id = category_id if category_id in category_id_list else 1000\n # 插入数据库\n result = cursor.execute(sql, (brand_id, create_time, brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n # 加入去重队列\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n # 数据库中的所有的字段 22 个\n kws = (\"product_name\", \"category_id_1\", \"brand_id\", \"product_desc\",\n \"short_product_name\", \"sku_key_1\", \"sku_key_2\", \"sku_key_3\", \"product_flag\",\n \"min_firstpay\", \"is_product_up_down\", \"real_amount\", \"mart_amount\", \"fq_num\",\n \"product_info\", \"delivery_time\", \"gift_list\", \"fe_params\", \"slider_imgs\",\n \"detail_imgs\", \"create_time\")\n # 插入除 商品 id 之外的字段\n # sql = \"insert into goods () values (%s, %s, %s, %s, %s, \" \\\n # \"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n sql = \"insert into goods (good_name,category_id,brand_id,product_name,short_product_name,\" \\\n \"sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,\" \\\n \"mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,\" \\\n \"create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n # 获取mongodb 中的数据\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n # 商品 id 去重集合\n # good_id_set = set()\n for kw in kws[:-5]:\n info = good[\"detail_data\"].get(kw)\n data.append(info)\n # 单独处理复杂的项目\n gift_list = \" \".join([str(s) for s in good[\"detail_data\"].get(\"gift_list\")[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good[\"detail_data\"].get(\"fe_params\"))\n data.append(fe_params)\n slider_imgs = \"||\".join(good[\"slider_imgs\"])\n data.append(slider_imgs)\n detail_imgs = \"||\".join(good[\"detail_imgs\"])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, \"%Y-%m-%d %H:%M:%S\")\n data.append(create_time)\n # 判断 id 是否重复\n # if good[\"good_id\"] not in good_id_set:\n with conn.cursor() as cursor:\n cursor.execute(\"select brand_id from goods_brand\")\n # 查出所有的品牌 id\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute(\"select category_id from goods_category\")\n # 查出所有的种类 id\n all_category_ids = [category_id[0] for category_id in cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n # good_id_set.add(good[\"good_id\"])\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n # MySQL 连接\n conn = pymysql.connect(host=\"127.0.0.1\", port=3306, user=\"root\", password=\"123456\",\n db=\"test\", charset=\"utf8\", autocommit=False)\n # 将分类插入数据库\n # insert_category(conn)\n # 将品牌插入数据库\n # insert_brand(conn)\n # 将商品插入数据库\n # mongodb 连接\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN[\"fenqile\"][\"goods\"]\n insert_goods(conn, GOODS)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# -*- coding:utf-8 -*-
import easygui as eg
import time as tm
import numpy as np
import thread
import os
from urllib2 import urlopen, Request
import json
from datetime import datetime, timedelta
URL_IFENG='http://api.finance.ifeng.com/akmin?scode=%s&type=%s'
NUM_PER_THREAD=100#单线程监控的股票数
SCAN_INTERVAL=10
FILE_PATH=u'.\export'
END_HOUR=24
MAX_DATES=100
MSG_HEAD=u'\n 板块 代码 开盘价 均价 收盘价\n'
KDATA_ONE_DAY={'5':48,'15':16,'30':8,'60':4}
K_MIN_LABELS=['5', '15', '30', '60']
cross_list={}
def cross_monitor(codes,ktype,avn,thread_no,retry=3):
global cross_list
tmp_codes=[]
for code in codes:#代码信息改为 [0]证券代码+[1]所属板块+[2]最新行情时间
tmp_code=list(code)
tmp_code.append(u'0')
tmp_codes.append(tmp_code)
while datetime.now().hour<END_HOUR:
start=tm.clock()
for code in tmp_codes:
for _ in range(retry):
try:
url=URL_IFENG%(code[0],ktype)
request=Request(url)
lines=urlopen(request,timeout=3).read()
js=json.loads(lines)
data=js['record'][-avn:]
if data[-1][0]!=code[2]:
print u'发现新数据'
code[2]=data[-1][0]
mean=0
for j in range(avn):
mean=mean+float(data[-(j+1)][3])
mean=mean/avn
price_open=float(data[-2][3])
price_close=float(data[-1][3])
if price_open<=mean and mean<=price_close:
cross_list[code[1]][u'cross_codes'].append([code[0][2:8],price_open,mean,price_close])
except Exception as e:
print code,u'数据处理异常,错误信息',e
else:
break
finish=tm.clock()
print u'线程',thread_no,u'数据获取结束,总耗时',finish-start
tm.sleep(20)
#弹出提示窗口函数
def showcross():
global cross_list
msg=MSG_HEAD
for board, lis in cross_list.iteritems():
new_num=len(lis[u'cross_codes'])
if lis[u'cross_num']<new_num:
msg=msg+u'============================================\n'
for code in lis[u'cross_codes'][lis[u'cross_num']:new_num]:
msg=msg+'['+board+u'] '+code[0]+' '+str(code[1])+' '+str(code[2])+' '+str(code[3])+'\n'
lis[u'cross_num']=new_num
if msg!=MSG_HEAD:
eg.msgbox(msg=msg,title=u'发现K线上穿均线的股票',ok_button=u'知道了')
#写日志
try:
log=open('log.txt','a')
log.write('\n'+datetime.now().isoformat(' '))
log.write(msg.encode('gbk'))
except:
eg.msgbox(u'写日志失败')
finally:
log.close()
return None
if __name__ == "__main__":
#code=raw_input(u'code:')
total_codes=0
avn=0
codes=[]
ktype=eg.choicebox(msg=u'请选择k线周期', choices=K_MIN_LABELS)
while(avn<=1):
avn=eg.integerbox(msg=u'请输入均线天数,范围在1-500之间', default=10, upperbound=500)
try:
dir_list=os.listdir(FILE_PATH)
except:
eg.msgbox(u'查找数据文件出现异常')
exit()
for dir_name in dir_list:
#检查是否为目录
path_test=os.path.join(FILE_PATH,dir_name)
if os.path.isdir(path_test):
cross_list[dir_name]={u'cross_num':0,u'cross_codes':[]}
try:
file_list=os.listdir(path_test)
except:
eg.msgbox(u'查找数据文件出现异常')
for file_name in file_list:
if file_name[0:2]=='SZ':
codes.append([u'sz'+file_name[3:9],dir_name])
total_codes=total_codes+1
elif file_name[0:2]=='SH':
codes.append([u'sh'+file_name[3:9],dir_name])
total_codes=total_codes+1
if total_codes==0:
eg.msgbox(u'没有发现数据文件')
exit()
try:
k=0
i=0
while k<total_codes:
if (k+NUM_PER_THREAD)>=total_codes:
thread.start_new_thread(cross_monitor,(codes[k:],ktype,avn,i,))
else:
thread.start_new_thread(cross_monitor,(codes[k:k+NUM_PER_THREAD],ktype,avn,i,))
i=i+1
k=k+NUM_PER_THREAD
except:
eg.msgbox(msg=u'创建监控线程失败')
exit()
while datetime.now().hour<END_HOUR:#下午4点结束监控
showcross()
tm.sleep(SCAN_INTERVAL)
eg.msgbox(msg=u'闭市了!')
|
normal
|
{
"blob_id": "57027cd638a01a1e556bcde99bcbe2a3b2fa0ef8",
"index": 2388,
"step-1": "# -*- coding:utf-8 -*-\nimport easygui as eg\nimport time as tm\nimport numpy as np\nimport thread\nimport os\nfrom urllib2 import urlopen, Request\nimport json\nfrom datetime import datetime, timedelta\n\nURL_IFENG='http://api.finance.ifeng.com/akmin?scode=%s&type=%s'\nNUM_PER_THREAD=100#单线程监控的股票数\nSCAN_INTERVAL=10\nFILE_PATH=u'.\\export'\nEND_HOUR=24\nMAX_DATES=100\nMSG_HEAD=u'\\n 板块 代码 开盘价 均价 收盘价\\n'\nKDATA_ONE_DAY={'5':48,'15':16,'30':8,'60':4}\nK_MIN_LABELS=['5', '15', '30', '60']\ncross_list={}\n\ndef cross_monitor(codes,ktype,avn,thread_no,retry=3):\n global cross_list\n tmp_codes=[]\n for code in codes:#代码信息改为 [0]证券代码+[1]所属板块+[2]最新行情时间\n tmp_code=list(code)\n tmp_code.append(u'0')\n tmp_codes.append(tmp_code)\n while datetime.now().hour<END_HOUR:\n start=tm.clock()\n for code in tmp_codes:\n for _ in range(retry):\n try:\n url=URL_IFENG%(code[0],ktype)\n request=Request(url)\n lines=urlopen(request,timeout=3).read()\n js=json.loads(lines)\n data=js['record'][-avn:]\n if data[-1][0]!=code[2]:\n print u'发现新数据'\n code[2]=data[-1][0]\n mean=0\n for j in range(avn):\n mean=mean+float(data[-(j+1)][3])\n mean=mean/avn\n price_open=float(data[-2][3])\n price_close=float(data[-1][3])\n if price_open<=mean and mean<=price_close:\n cross_list[code[1]][u'cross_codes'].append([code[0][2:8],price_open,mean,price_close])\n except Exception as e:\n print code,u'数据处理异常,错误信息',e\n else:\n break\n finish=tm.clock()\n print u'线程',thread_no,u'数据获取结束,总耗时',finish-start\n tm.sleep(20) \n\n\n#弹出提示窗口函数\ndef showcross():\n global cross_list \n msg=MSG_HEAD\n for board, lis in cross_list.iteritems():\n new_num=len(lis[u'cross_codes'])\n if lis[u'cross_num']<new_num:\n msg=msg+u'============================================\\n'\n for code in lis[u'cross_codes'][lis[u'cross_num']:new_num]:\n msg=msg+'['+board+u'] '+code[0]+' '+str(code[1])+' '+str(code[2])+' '+str(code[3])+'\\n'\n lis[u'cross_num']=new_num\n if msg!=MSG_HEAD:\n eg.msgbox(msg=msg,title=u'发现K线上穿均线的股票',ok_button=u'知道了')\n #写日志\n try:\n log=open('log.txt','a')\n log.write('\\n'+datetime.now().isoformat(' '))\n log.write(msg.encode('gbk'))\n except:\n eg.msgbox(u'写日志失败')\n finally:\n log.close()\n return None\n \n\nif __name__ == \"__main__\":\n #code=raw_input(u'code:')\n total_codes=0\n avn=0\n codes=[]\n ktype=eg.choicebox(msg=u'请选择k线周期', choices=K_MIN_LABELS)\n while(avn<=1):\n avn=eg.integerbox(msg=u'请输入均线天数,范围在1-500之间', default=10, upperbound=500)\n try:\n dir_list=os.listdir(FILE_PATH)\n except:\n eg.msgbox(u'查找数据文件出现异常')\n exit()\n for dir_name in dir_list:\n #检查是否为目录\n path_test=os.path.join(FILE_PATH,dir_name)\n if os.path.isdir(path_test):\n cross_list[dir_name]={u'cross_num':0,u'cross_codes':[]}\n try:\n file_list=os.listdir(path_test)\n except:\n eg.msgbox(u'查找数据文件出现异常')\n for file_name in file_list:\n if file_name[0:2]=='SZ':\n codes.append([u'sz'+file_name[3:9],dir_name])\n total_codes=total_codes+1\n elif file_name[0:2]=='SH':\n codes.append([u'sh'+file_name[3:9],dir_name])\n total_codes=total_codes+1\n if total_codes==0:\n eg.msgbox(u'没有发现数据文件')\n exit()\n try:\n k=0\n i=0\n while k<total_codes:\n if (k+NUM_PER_THREAD)>=total_codes:\n thread.start_new_thread(cross_monitor,(codes[k:],ktype,avn,i,))\n else:\n thread.start_new_thread(cross_monitor,(codes[k:k+NUM_PER_THREAD],ktype,avn,i,))\n i=i+1\n k=k+NUM_PER_THREAD\n except:\n eg.msgbox(msg=u'创建监控线程失败')\n exit() \n\n while datetime.now().hour<END_HOUR:#下午4点结束监控\n showcross()\n tm.sleep(SCAN_INTERVAL)\n eg.msgbox(msg=u'闭市了!')\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#This program is a nice example of a core algorithm
#Remove Individual Digits
# To remove individual digits you use two operations
# 1 MOD:
# mod return the remainder after division. 5%2 = 1.
# If we mod by 10 we get the units digit. 723%10 = 3
# 2 Integer Division:
# Integer division is when we divide and remove decimals;
# we DO NOT round, simply cut them off. To integer divide
# in Python we use //. For example 723//10 = 72. This is
# a quick way to remove decimals.
def findSum(n):
s = 0 #store the sum of the values
while (n > 0):
x = n % 10 #chop off units digit store in x
s = s + x #add unit digit to sum, stored in s
n = n // 10 #remove unit digit from n
return s
def isHarshad(n):
if (n % findSum(n) == 0): #note that if a % b == 0 b is a factor of a
return True
return False
def findHarshad(low, high):
low = 500
high = 525
streak = 0
maxStreak = 0
for i in range(low,high + 1,1):
if (isHarshad(i)):
streak = streak + 1;
else:
maxStreak = max(streak,maxStreak)
streak = 0;
#print(i,streak) #Test code for debugging
maxStreak = max(streak,maxStreak)
print(maxStreak)
f = open("DwiteHarshadNumbersData.txt", "r")
#Python short cut which loops as long as there is a new line in the file
for line in f:
l = f.readline()
h = f.readline()
findHarshad(l,h)
f.close()
|
normal
|
{
"blob_id": "2a95a68d8570a314b2b6e5731d7a695e5d7e7b30",
"index": 6261,
"step-1": "<mask token>\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\n<mask token>\n",
"step-2": "def findSum(n):\n s = 0\n while n > 0:\n x = n % 10\n s = s + x\n n = n // 10\n return s\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\n<mask token>\n",
"step-3": "def findSum(n):\n s = 0\n while n > 0:\n x = n % 10\n s = s + x\n n = n // 10\n return s\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\n<mask token>\nfor line in f:\n l = f.readline()\n h = f.readline()\n findHarshad(l, h)\nf.close()\n",
"step-4": "def findSum(n):\n s = 0\n while n > 0:\n x = n % 10\n s = s + x\n n = n // 10\n return s\n\n\ndef isHarshad(n):\n if n % findSum(n) == 0:\n return True\n return False\n\n\ndef findHarshad(low, high):\n low = 500\n high = 525\n streak = 0\n maxStreak = 0\n for i in range(low, high + 1, 1):\n if isHarshad(i):\n streak = streak + 1\n else:\n maxStreak = max(streak, maxStreak)\n streak = 0\n maxStreak = max(streak, maxStreak)\n print(maxStreak)\n\n\nf = open('DwiteHarshadNumbersData.txt', 'r')\nfor line in f:\n l = f.readline()\n h = f.readline()\n findHarshad(l, h)\nf.close()\n",
"step-5": "#This program is a nice example of a core algorithm\n#Remove Individual Digits\n# To remove individual digits you use two operations\n# 1 MOD:\n\t# mod return the remainder after division. 5%2 = 1.\n\t# If we mod by 10 we get the units digit. 723%10 = 3\n# 2 Integer Division:\n#\tInteger division is when we divide and remove decimals;\n#\twe DO NOT round, simply cut them off. To integer divide \n# \tin Python we use //. For example 723//10 = 72. This is \n# \ta quick way to remove decimals. \n\n\ndef findSum(n):\n\ts = 0\t#store the sum of the values\n\twhile (n > 0):\n\t\tx = n % 10 #chop off units digit store in x\n\t\ts = s + x #add unit digit to sum, stored in s\n\t\tn = n // 10 #remove unit digit from n\n\t\t\n\treturn s\n\ndef isHarshad(n):\n\n\tif (n % findSum(n) == 0): #note that if a % b == 0 b is a factor of a\n\t\treturn True\n\treturn False\n\n\ndef findHarshad(low, high):\n\tlow = 500\n\thigh = 525\n\tstreak = 0\n\tmaxStreak = 0\n\n\tfor i in range(low,high + 1,1):\n\t\tif (isHarshad(i)):\n\t\t\tstreak = streak + 1;\n\t\telse:\n\t\t\tmaxStreak = max(streak,maxStreak)\n\t\t\tstreak = 0;\n\t\t#print(i,streak) #Test code for debugging\n\n\tmaxStreak = max(streak,maxStreak)\n\tprint(maxStreak)\n\nf = open(\"DwiteHarshadNumbersData.txt\", \"r\")\n#Python short cut which loops as long as there is a new line in the file\nfor line in f:\n\tl = f.readline()\n\th = f.readline()\n\tfindHarshad(l,h)\n\nf.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# menuScrnTxt.py
# Created on Mon Mar 8 16:17:50 2021
# @author: jcj52436999
# menuScrnTxt.py-2021-03-08-1641-just noting a general restart in efforts here
import sys
def printTest2():
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("Top of Start of program " + thisProgramIs))
print(" ")
return
def printTest2():
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("printTest2() " + thisProgramIs))
print(" ")
return
# import
def menuInit(cmdArray):
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("start of menuInit of program " + thisProgramIs))
print(" ")
return
# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the Start of main jcj-jcjjcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj
def main(argv=None):
#import sys
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(" ")
print("# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("Start of program in Main " + thisProgramIs))
print(" ")
# import sys
import curses
import getpass
import os
import shutil
import subprocess
import pprint
# import pformat
from subprocess import Popen, PIPE, STDOUT
# import urwid
import numpy
import pygame
import tkinter
print (" ")
# Trying to install a favorite set of Ubu software.
#tempHold = tempHold[1]
## print( tempHold )
## cmdArray = " " ;
## cmdArray = menuLineReactions[ tempHold ]();
reEntered = (input( "Stop chosen, all RAM data will be lost, are you sure? y or n: " ))
if reEntered == "y" or reEntered == "Y":
return #sys.exit() sys.exit()
else:
print( "Staying for more entry. ")
#
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {( w, h): " " for w in range(cmdArrayWidth) for h in range(cmdArrayHeight)}
menuInit( cmdArray )
# out_bytes.wait()
out_bytes = " "
print(("# jcj-jcj-jcj-" + thisProgramIs + " Function Main is ending with sys.exit(): ", out_bytes))
print(" ")
print("# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
print(" ")
# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the End of main jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj
if __name__ == "__main__":
sys.exit(main())
# =============================================================================
#
# def main():
# ...
#
# if __name__ == "__main__":
# main()
#
#
# =============================================================================
|
normal
|
{
"blob_id": "e4f7e0c40edde4aac6ba0a7529a2e028a09689ae",
"index": 7260,
"step-1": "<mask token>\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\n<mask token>\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('printTest2() ' + thisProgramIs)\n print(' ')\n return\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('printTest2() ' + thisProgramIs)\n print(' ')\n return\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import sys\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Top of Start of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef printTest2():\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('printTest2() ' + thisProgramIs)\n print(' ')\n return\n\n\ndef menuInit(cmdArray):\n if 0 == 0:\n print(' ')\n print(\n '# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('start of menuInit of program ' + thisProgramIs)\n print(' ')\n return\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv)\n pyScriptProgramName = sys.argv[0]\n print(' ')\n print(\n '# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n thisProgramIs = 'menuScrnTxt.py'\n print('Start of program in Main ' + thisProgramIs)\n print(' ')\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n from subprocess import Popen, PIPE, STDOUT\n import numpy\n import pygame\n import tkinter\n print(' ')\n reEntered = input(\n 'Stop chosen, all RAM data will be lost, are you sure? y or n: ')\n if reEntered == 'y' or reEntered == 'Y':\n return\n else:\n print('Staying for more entry. ')\n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h\n cmdArray = {(w, h): ' ' for w in range(cmdArrayWidth) for h in range(\n cmdArrayHeight)}\n menuInit(cmdArray)\n out_bytes = ' '\n print(('# jcj-jcj-jcj-' + thisProgramIs +\n ' Function Main is ending with sys.exit(): ', out_bytes))\n print(' ')\n print('# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj'\n )\n print(' ')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# menuScrnTxt.py\n# Created on Mon Mar 8 16:17:50 2021\n# @author: jcj52436999\n\n# menuScrnTxt.py-2021-03-08-1641-just noting a general restart in efforts here\n\nimport sys\n\ndef printTest2():\n \n if 0 == 0 :\n print(\" \")\n print(\"# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"Top of Start of program \" + thisProgramIs))\n print(\" \")\n return\n\n\ndef printTest2():\n\n if 0 == 0 :\n print(\" \")\n print(\"# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"printTest2() \" + thisProgramIs))\n print(\" \")\n return\n\n\n# import \n\ndef menuInit(cmdArray):\n\n if 0 == 0 :\n print(\" \")\n print(\"# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"start of menuInit of program \" + thisProgramIs))\n print(\" \") \n \n return\n\n\n# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the Start of main jcj-jcjjcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\ndef main(argv=None):\n #import sys\n if argv is None:\n argv = sys.argv\n lenArgv = len(sys.argv) \n pyScriptProgramName = sys.argv[0]\n\n\n print(\" \")\n print(\"# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n thisProgramIs = \"menuScrnTxt.py\"\n print((\"Start of program in Main \" + thisProgramIs))\n print(\" \")\n\n # import sys\n import curses\n import getpass\n import os\n import shutil\n import subprocess\n import pprint\n # import pformat \n \n from subprocess import Popen, PIPE, STDOUT\n\n # import urwid\n import numpy\n import pygame\n import tkinter\n \n print (\" \") \n\n # Trying to install a favorite set of Ubu software.\n \n #tempHold = tempHold[1] \n ## print( tempHold )\n ## cmdArray = \" \" ; \n ## cmdArray = menuLineReactions[ tempHold ](); \n\n reEntered = (input( \"Stop chosen, all RAM data will be lost, are you sure? y or n: \" )) \n if reEntered == \"y\" or reEntered == \"Y\":\n return #sys.exit() sys.exit()\n else: \n print( \"Staying for more entry. \")\n \n # \n w = 5\n h = 99\n cmdArrayWidth = w\n cmdArrayHeight = h \n cmdArray = {( w, h): \" \" for w in range(cmdArrayWidth) for h in range(cmdArrayHeight)}\n\n menuInit( cmdArray )\n\n # out_bytes.wait() \n out_bytes = \" \" \n print((\"# jcj-jcj-jcj-\" + thisProgramIs + \" Function Main is ending with sys.exit(): \", out_bytes))\n\n print(\" \")\n print(\"# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\")\n print(\" \")\n# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the End of main jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj\nif __name__ == \"__main__\":\n sys.exit(main())\n\n\n# =============================================================================\n# \n# def main():\n# ...\n# \n# if __name__ == \"__main__\":\n# main()\n# \n# \n# =============================================================================\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
from application import app
import pprint
import sys
URL_PREFIX = '/pub/livemap'
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
if '-l' not in sys.argv:
# app.wsgi_app = LoggingMiddleware(app.wsgi_app)
app.wsgi_app = ScriptNameEdit(app.wsgi_app)
application = app
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=True)
|
normal
|
{
"blob_id": "a2aa615ac660f13727a97cdd2feaca8f6e457da4",
"index": 4830,
"step-1": "<mask token>\n\n\nclass LoggingMiddleware(object):\n <mask token>\n <mask token>\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LoggingMiddleware(object):\n\n def __init__(self, app):\n self._app = app\n <mask token>\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LoggingMiddleware(object):\n\n def __init__(self, app):\n self._app = app\n\n def __call__(self, environ, resp):\n errorlog = environ['wsgi.errors']\n pprint.pprint(('REQUEST', environ), stream=errorlog)\n\n def log_response(status, headers, *args):\n pprint.pprint(('RESPONSE', status, headers), stream=errorlog)\n return resp(status, headers, *args)\n return self._app(environ, log_response)\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\n<mask token>\n",
"step-4": "from application import app\nimport pprint\nimport sys\nURL_PREFIX = '/pub/livemap'\n\n\nclass LoggingMiddleware(object):\n\n def __init__(self, app):\n self._app = app\n\n def __call__(self, environ, resp):\n errorlog = environ['wsgi.errors']\n pprint.pprint(('REQUEST', environ), stream=errorlog)\n\n def log_response(status, headers, *args):\n pprint.pprint(('RESPONSE', status, headers), stream=errorlog)\n return resp(status, headers, *args)\n return self._app(environ, log_response)\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\nif '-l' not in sys.argv:\n app.wsgi_app = ScriptNameEdit(app.wsgi_app)\napplication = app\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=True)\n",
"step-5": "#!/usr/bin/env python\nfrom application import app\nimport pprint\nimport sys\n\nURL_PREFIX = '/pub/livemap'\n\n\nclass LoggingMiddleware(object):\n def __init__(self, app):\n self._app = app\n\n def __call__(self, environ, resp):\n errorlog = environ['wsgi.errors']\n pprint.pprint(('REQUEST', environ), stream=errorlog)\n\n def log_response(status, headers, *args):\n pprint.pprint(('RESPONSE', status, headers), stream=errorlog)\n return resp(status, headers, *args)\n\n return self._app(environ, log_response)\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\nif '-l' not in sys.argv:\n # app.wsgi_app = LoggingMiddleware(app.wsgi_app)\n app.wsgi_app = ScriptNameEdit(app.wsgi_app)\n\napplication = app\n\nif __name__ == \"__main__\":\n\n app.run(host='0.0.0.0', threaded=True)\n",
"step-ids": [
4,
5,
6,
9,
10
]
}
|
[
4,
5,
6,
9,
10
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-05-29 04:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nomenclature', '0002_saloon_default'),
]
operations = [
migrations.AlterField(
model_name='supplier',
name='description',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Описание'),
),
]
|
normal
|
{
"blob_id": "7817a42e5aee1786cfb3e8018bd7ca0a5e74749d",
"index": 8447,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('nomenclature', '0002_saloon_default')]\n operations = [migrations.AlterField(model_name='supplier', name=\n 'description', field=models.CharField(blank=True, max_length=500,\n null=True, verbose_name='Описание'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('nomenclature', '0002_saloon_default')]\n operations = [migrations.AlterField(model_name='supplier', name=\n 'description', field=models.CharField(blank=True, max_length=500,\n null=True, verbose_name='Описание'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.6 on 2017-05-29 04:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('nomenclature', '0002_saloon_default'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='supplier',\n name='description',\n field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Описание'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from app import create_app, db
import unittest
import json
class Test(unittest.TestCase):
def setUp(self):
"""Before each test, set up a blank database"""
self.app = create_app("configmodule.TestingConfig")
self.app.testing = True
self.client = self.app.test_client()
with self.app.app_context():
db.drop_all()
db.create_all()
# Called after every test
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
def test_user(self):
# Create user
rv = self.client.post(
"/api/users/",
data=json.dumps({"email": "[email protected]", "password": "abc123"}),
)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["id"] == 1
assert "password" not in rv_dict
assert rv_dict["email"] == "[email protected]"
# Try loggin with wrong PASSWORD
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc1234"}))
assert rv.status_code == 401
# Try loggin with wrong Email
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc1234"}))
assert rv.status_code == 401
# Try loggin with right PASSWORD
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc123"}))
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
headers = {"Authorization": "Bearer " + rv_dict["access_token"]}
# Get the current user
rv = self.client.get("/api/users/", headers=headers)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["email"] == "[email protected]"
rv = self.client.put("/api/users/", data=json.dumps({"name": "carl carlsson"}), headers=headers)
rv_dict = json.loads(rv.data.decode())
assert rv.status_code == 200
assert rv_dict["name"] == "Carl Carlsson"
def test_empty(self):
# Try loggin withou any users
rv = self.client.post("/api/users/login", data=json.dumps({"email": "[email protected]", "password": "abc123"}))
assert rv.status_code == 401
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "56b4262e88793be366d8ffe0fe4427fdb2a99bd7",
"index": 7447,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n <mask token>\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from app import create_app, db\nimport unittest\nimport json\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app('configmodule.TestingConfig')\n self.app.testing = True\n self.client = self.app.test_client()\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n rv = self.client.post('/api/users/', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['id'] == 1\n assert 'password' not in rv_dict\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc1234'}))\n assert rv.status_code == 401\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {'Authorization': 'Bearer ' + rv_dict['access_token']}\n rv = self.client.get('/api/users/', headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['email'] == '[email protected]'\n rv = self.client.put('/api/users/', data=json.dumps({'name':\n 'carl carlsson'}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict['name'] == 'Carl Carlsson'\n\n def test_empty(self):\n rv = self.client.post('/api/users/login', data=json.dumps({'email':\n '[email protected]', 'password': 'abc123'}))\n assert rv.status_code == 401\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "from app import create_app, db\nimport unittest\nimport json\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n \"\"\"Before each test, set up a blank database\"\"\"\n self.app = create_app(\"configmodule.TestingConfig\")\n self.app.testing = True\n\n self.client = self.app.test_client()\n\n with self.app.app_context():\n db.drop_all()\n db.create_all()\n\n # Called after every test\n def tearDown(self):\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\n def test_user(self):\n # Create user\n rv = self.client.post(\n \"/api/users/\",\n data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc123\"}),\n )\n rv_dict = json.loads(rv.data.decode())\n\n assert rv.status_code == 200\n assert rv_dict[\"id\"] == 1\n assert \"password\" not in rv_dict\n assert rv_dict[\"email\"] == \"[email protected]\"\n\n # Try loggin with wrong PASSWORD\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc1234\"}))\n assert rv.status_code == 401\n\n # Try loggin with wrong Email\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc1234\"}))\n assert rv.status_code == 401\n\n # Try loggin with right PASSWORD\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc123\"}))\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n headers = {\"Authorization\": \"Bearer \" + rv_dict[\"access_token\"]}\n\n # Get the current user\n rv = self.client.get(\"/api/users/\", headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict[\"email\"] == \"[email protected]\"\n\n rv = self.client.put(\"/api/users/\", data=json.dumps({\"name\": \"carl carlsson\"}), headers=headers)\n rv_dict = json.loads(rv.data.decode())\n assert rv.status_code == 200\n assert rv_dict[\"name\"] == \"Carl Carlsson\"\n\n def test_empty(self):\n # Try loggin withou any users\n rv = self.client.post(\"/api/users/login\", data=json.dumps({\"email\": \"[email protected]\", \"password\": \"abc123\"}))\n assert rv.status_code == 401\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
# упражнение выполнено на Python 3
manual_calc = 53 + 1.0/3
def trapezoidal(f, a, b, n):
h = float(b - a)/n
result = 0.5*(f(a) + f(b))
for i in range(1, n):
result += f(a + i*h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a)/n
result = f(a+0.5*h)
for i in range(1, n):
result += f(a + 0.5*h + i*h)
result *= h
return result
trap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)
trap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)
rect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)
rect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)
print('Точное значение интеграла: {}\n'.format(manual_calc))
print('Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}'
.format(trap_2, trap_100))
print('Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n'
.format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))
print('Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(rect_2, rect_100))
print('Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))
|
normal
|
{
"blob_id": "4fbf5b4520aa4dca4c7cc80d56ba00f634d184bf",
"index": 3405,
"step-1": "<mask token>\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\n<mask token>\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\nprint(\"\"\"Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\"\"\".\n format(trap_2, trap_100))\nprint(\n \"\"\"Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n\"\"\"\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\nprint(\n \"\"\"Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(rect_2, rect_100))\nprint(\n \"\"\"Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))\n",
"step-4": "manual_calc = 53 + 1.0 / 3\n\n\ndef trapezoidal(f, a, b, n):\n h = float(b - a) / n\n result = 0.5 * (f(a) + f(b))\n for i in range(1, n):\n result += f(a + i * h)\n result *= h\n return result\n\n\ndef rectangular(f, a, b, n):\n h = float(b - a) / n\n result = f(a + 0.5 * h)\n for i in range(1, n):\n result += f(a + 0.5 * h + i * h)\n result *= h\n return result\n\n\ntrap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)\ntrap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)\nrect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)\nrect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\nprint(\"\"\"Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\"\"\".\n format(trap_2, trap_100))\nprint(\n \"\"\"Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n\"\"\"\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\nprint(\n \"\"\"Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(rect_2, rect_100))\nprint(\n \"\"\"Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}\"\"\"\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))\n",
"step-5": "# -*- coding: utf-8 -*-\n# упражнение выполнено на Python 3\n\n\nmanual_calc = 53 + 1.0/3\n\n\ndef trapezoidal(f, a, b, n):\n\t\n\th = float(b - a)/n\n\tresult = 0.5*(f(a) + f(b))\n\tfor i in range(1, n):\n\t\tresult += f(a + i*h)\n\tresult *= h\n\treturn result\n\n\ndef rectangular(f, a, b, n):\n\t\n\th = float(b - a)/n\n\tresult = f(a+0.5*h)\n\tfor i in range(1, n):\n\t\tresult += f(a + 0.5*h + i*h)\n\tresult *= h\n\treturn result\n\n\ntrap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)\ntrap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)\nrect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)\nrect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)\n\nprint('Точное значение интеграла: {}\\n'.format(manual_calc))\n\nprint('Аппроксимация трапециями:\\n 2 трапеции: {}\\n 100 трапеций: {}'\n .format(trap_2, trap_100))\n\nprint('Погрешность для аппроксимации трапециями:\\n 2 трапеции: {}\\n 100 трапеций: {}\\n'\n .format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))\n\nprint('Аппроксимация прямоугольниками:\\n 2 прямоугольника: {}\\n 100 прямоугольников: {}'\n .format(rect_2, rect_100))\n\nprint('Погрешность для аппроксимации прямоугольниками:\\n 2 прямоугольника: {}\\n 100 прямоугольников: {}'\n .format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc)))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import proactive
import unittest
import numbers
import os
import pytest
class RestApiTestSuite(unittest.TestCase):
"""Advanced test cases."""
gateway = None
username = ""
password = ""
@pytest.fixture(autouse=True)
def setup_gateway(self, metadata):
self.gateway = proactive.ProActiveGateway(metadata['proactive_url'], debug=True)
self.username = metadata['username']
self.password = metadata['password']
def test_rm_model_hosts(self):
self.gateway.connect(self.username, self.password)
restapi = self.gateway.getProactiveRestApi()
hosts = restapi.get_rm_model_hosts()
self.assertIsNotNone(hosts)
self.assertTrue(isinstance(hosts, list))
self.gateway.disconnect()
def test_rm_model_nodesources(self):
self.gateway.connect(self.username, self.password)
restapi = self.gateway.getProactiveRestApi()
nodesources = restapi.get_rm_model_nodesources()
self.assertIsNotNone(nodesources)
self.assertTrue(isinstance(nodesources, list))
self.gateway.disconnect()
def test_rm_model_tokens(self):
self.gateway.connect(self.username, self.password)
restapi = self.gateway.getProactiveRestApi()
tokens = restapi.get_rm_model_tokens()
self.assertIsNotNone(tokens)
self.assertTrue(isinstance(tokens, list))
self.gateway.disconnect()
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "da2c615b8fab8de6bd63864508da254a46e65bb8",
"index": 4543,
"step-1": "<mask token>\n\n\nclass RestApiTestSuite(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @pytest.fixture(autouse=True)\n def setup_gateway(self, metadata):\n self.gateway = proactive.ProActiveGateway(metadata['proactive_url'],\n debug=True)\n self.username = metadata['username']\n self.password = metadata['password']\n <mask token>\n\n def test_rm_model_nodesources(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n nodesources = restapi.get_rm_model_nodesources()\n self.assertIsNotNone(nodesources)\n self.assertTrue(isinstance(nodesources, list))\n self.gateway.disconnect()\n\n def test_rm_model_tokens(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n tokens = restapi.get_rm_model_tokens()\n self.assertIsNotNone(tokens)\n self.assertTrue(isinstance(tokens, list))\n self.gateway.disconnect()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RestApiTestSuite(unittest.TestCase):\n \"\"\"Advanced test cases.\"\"\"\n gateway = None\n username = ''\n password = ''\n\n @pytest.fixture(autouse=True)\n def setup_gateway(self, metadata):\n self.gateway = proactive.ProActiveGateway(metadata['proactive_url'],\n debug=True)\n self.username = metadata['username']\n self.password = metadata['password']\n\n def test_rm_model_hosts(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n hosts = restapi.get_rm_model_hosts()\n self.assertIsNotNone(hosts)\n self.assertTrue(isinstance(hosts, list))\n self.gateway.disconnect()\n\n def test_rm_model_nodesources(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n nodesources = restapi.get_rm_model_nodesources()\n self.assertIsNotNone(nodesources)\n self.assertTrue(isinstance(nodesources, list))\n self.gateway.disconnect()\n\n def test_rm_model_tokens(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n tokens = restapi.get_rm_model_tokens()\n self.assertIsNotNone(tokens)\n self.assertTrue(isinstance(tokens, list))\n self.gateway.disconnect()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RestApiTestSuite(unittest.TestCase):\n \"\"\"Advanced test cases.\"\"\"\n gateway = None\n username = ''\n password = ''\n\n @pytest.fixture(autouse=True)\n def setup_gateway(self, metadata):\n self.gateway = proactive.ProActiveGateway(metadata['proactive_url'],\n debug=True)\n self.username = metadata['username']\n self.password = metadata['password']\n\n def test_rm_model_hosts(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n hosts = restapi.get_rm_model_hosts()\n self.assertIsNotNone(hosts)\n self.assertTrue(isinstance(hosts, list))\n self.gateway.disconnect()\n\n def test_rm_model_nodesources(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n nodesources = restapi.get_rm_model_nodesources()\n self.assertIsNotNone(nodesources)\n self.assertTrue(isinstance(nodesources, list))\n self.gateway.disconnect()\n\n def test_rm_model_tokens(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n tokens = restapi.get_rm_model_tokens()\n self.assertIsNotNone(tokens)\n self.assertTrue(isinstance(tokens, list))\n self.gateway.disconnect()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import proactive\nimport unittest\nimport numbers\nimport os\nimport pytest\n\n\nclass RestApiTestSuite(unittest.TestCase):\n \"\"\"Advanced test cases.\"\"\"\n gateway = None\n username = ''\n password = ''\n\n @pytest.fixture(autouse=True)\n def setup_gateway(self, metadata):\n self.gateway = proactive.ProActiveGateway(metadata['proactive_url'],\n debug=True)\n self.username = metadata['username']\n self.password = metadata['password']\n\n def test_rm_model_hosts(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n hosts = restapi.get_rm_model_hosts()\n self.assertIsNotNone(hosts)\n self.assertTrue(isinstance(hosts, list))\n self.gateway.disconnect()\n\n def test_rm_model_nodesources(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n nodesources = restapi.get_rm_model_nodesources()\n self.assertIsNotNone(nodesources)\n self.assertTrue(isinstance(nodesources, list))\n self.gateway.disconnect()\n\n def test_rm_model_tokens(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n tokens = restapi.get_rm_model_tokens()\n self.assertIsNotNone(tokens)\n self.assertTrue(isinstance(tokens, list))\n self.gateway.disconnect()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import proactive\nimport unittest\nimport numbers\nimport os\nimport pytest\n\n\nclass RestApiTestSuite(unittest.TestCase):\n \"\"\"Advanced test cases.\"\"\"\n\n gateway = None\n username = \"\"\n password = \"\"\n\n @pytest.fixture(autouse=True)\n def setup_gateway(self, metadata):\n self.gateway = proactive.ProActiveGateway(metadata['proactive_url'], debug=True)\n self.username = metadata['username']\n self.password = metadata['password']\n\n def test_rm_model_hosts(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n hosts = restapi.get_rm_model_hosts()\n self.assertIsNotNone(hosts)\n self.assertTrue(isinstance(hosts, list))\n self.gateway.disconnect()\n\n def test_rm_model_nodesources(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n nodesources = restapi.get_rm_model_nodesources()\n self.assertIsNotNone(nodesources)\n self.assertTrue(isinstance(nodesources, list))\n self.gateway.disconnect()\n\n def test_rm_model_tokens(self):\n self.gateway.connect(self.username, self.password)\n restapi = self.gateway.getProactiveRestApi()\n tokens = restapi.get_rm_model_tokens()\n self.assertIsNotNone(tokens)\n self.assertTrue(isinstance(tokens, list))\n self.gateway.disconnect()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
'''
Given an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- "{}" "()" "[]". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.
Input: An expression with different of types brackets.
Output: A boolean. Correct an expression or not.
Example:
?
1
2
3
4
5
checkio("((5+3)*2+1)") == True
checkio("{[(3+1)+2]+}") == True
checkio("(3+{1-1)}") == False
checkio("[1+1]+(2*2)-{3/3}") == True
checkio("(({[(((1)-2)+3)-3]/3}-3)") == False
'''
def checkio(data):
#replace this for solution
return True or False
|
normal
|
{
"blob_id": "f69b4d022ebed5a0b660f55704bbe762d5d765d5",
"index": 1332,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef checkio(data):\n return True or False\n",
"step-3": "'''\nGiven an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- \"{}\" \"()\" \"[]\". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.\nInput: An expression with different of types brackets.\nOutput: A boolean. Correct an expression or not.\nExample:\n?\n1\n2\n3\n4\n5\ncheckio(\"((5+3)*2+1)\") == True\ncheckio(\"{[(3+1)+2]+}\") == True\ncheckio(\"(3+{1-1)}\") == False\ncheckio(\"[1+1]+(2*2)-{3/3}\") == True\ncheckio(\"(({[(((1)-2)+3)-3]/3}-3)\") == False\n\n'''\ndef checkio(data):\n #replace this for solution\n return True or False",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
O(n) time complexity
O(n) space complexity
'''
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target-val], idx]
seenum[val] = idx
return [-1, -1]
if __name__ == "__main__":
nums = [2,7,11,15]
target = 9
sol = Solution()
print(sol.twoSum(nums, target))
|
normal
|
{
"blob_id": "b3f62c331ff4ae9f909fc90cc7303997b32daceb",
"index": 1876,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target - val], idx]\n seenum[val] = idx\n return [-1, -1]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target - val], idx]\n seenum[val] = idx\n return [-1, -1]\n\n\nif __name__ == '__main__':\n nums = [2, 7, 11, 15]\n target = 9\n sol = Solution()\n print(sol.twoSum(nums, target))\n",
"step-5": "'''\nO(n) time complexity\nO(n) space complexity\n'''\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target-val], idx]\n seenum[val] = idx\n return [-1, -1]\n\nif __name__ == \"__main__\":\n nums = [2,7,11,15]\n target = 9\n sol = Solution()\n print(sol.twoSum(nums, target))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2023 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **standard Python module globals** (i.e., global constants
describing modules and packages bundled with CPython's standard library).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
# ....................{ NAMES }....................
BUILTINS_MODULE_NAME = 'builtins'
'''
Fully-qualified name of the **builtins module** (i.e., objects defined by the
standard :mod:`builtins` module and thus globally available by default
*without* requiring explicit importation).
'''
|
normal
|
{
"blob_id": "a42f36fca2f65d0c5c9b65055af1814d8b4b3d42",
"index": 89,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBUILTINS_MODULE_NAME = 'builtins'\n<mask token>\n",
"step-3": "#!/usr/bin/env python3\n# --------------------( LICENSE )--------------------\n# Copyright (c) 2014-2023 Beartype authors.\n# See \"LICENSE\" for further details.\n\n'''\nProject-wide **standard Python module globals** (i.e., global constants\ndescribing modules and packages bundled with CPython's standard library).\n\nThis private submodule is *not* intended for importation by downstream callers.\n'''\n\n# ....................{ IMPORTS }....................\n\n# ....................{ NAMES }....................\nBUILTINS_MODULE_NAME = 'builtins'\n'''\nFully-qualified name of the **builtins module** (i.e., objects defined by the\nstandard :mod:`builtins` module and thus globally available by default\n*without* requiring explicit importation).\n'''\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
from ARA import *
from State import *
def theta_given_s(theta, q):
"""
Probability of an random event theta given current state s.
Args:
theta: Random event
s = [q, r, w]: State
Returns:
Unnormalized probability of the random event.
"""
if q == 0:
return .3333
else:
if theta == 0:
return 0.25
elif theta == 1:
return 0.25
else:
return 0.5
def new_w(w, d):
"""
Multi-period commitments in the next epoch.
Args:
d: Defender's actions
m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)
s = [q, r, w]: Current State
tau: An array denoting the length of each multi-period commitment.
Returns:
next_w: Number of decision epochs remaining in the next epoch.
"""
if w.sum() > 0:
next_w = w.copy()
next_w[next_w > 0] -= 1
return next_w
else:
if d[0] == 1:
return np.array([51,0,0])
elif d[1] == 1:
return np.array([0,51,0])
else:
return np.array([0,0,51])
def attraction_h(next_r, a):
"""
Attraction function of resource (h in the paper).
Args:
next_r: Probable resource array in the next epoch.
next_w: Multi-period commitments in the next epoch.
d: Defender's actions
a: Attacker's actions
s = [q, r, w]: Current State
rho_da: A map mapping from (d_i, a_j) to response quality
rho_dq: A map mapping from (d_i, q) to response quality
h_above: attraction value when response quality is above threshold
h_below: attraction value when response quality is below threshold
dict_r: map resource to corresponding level.
thres: Threshold for a good response.
Returns:
Attraction value.
"""
if a == 0:
if next_r == 9:
return 0.8
elif next_r == 14:
return 0.1
else:
return 0.1
elif a == 1:
if next_r == 9:
return 0.1
elif next_r == 14:
return 0.1
else:
return 0.8
elif a == 2:
if next_r == 9:
return 0.1
elif next_r == 14:
return 0.3
else:
return 0.6
elif a == 3:
if next_r == 9:
return 0.1
elif next_r == 14:
return 0.2
else:
return 0.7
else:
if next_r == 9:
return 0.1
elif next_r == 14:
return 0.4
else:
return 0.5
def attraction_g(next_q, q, d, a):
"""
Attraction function of operational conditions (g in the paper).
Args:
next_q: Operational conditions in the next epoch.
next_r: Probable resource array in the next epoch.
next_w: Multi-period commitments in the next epoch.
d: Defender's actions
a: Attacker's actions
s = [q, r, w]: Current State
rho_da: A map mapping from (d_i, a_j) to response quality
rho_dq: A map mapping from (d_i, q) to response quality
g_above: attraction value when response quality is above threshold
g_below: attraction value when response quality is below threshold
thres: Threshold for a good response.
Returns:
Attraction value.
"""
if a == 0:
if next_q == 0:
xi_D = 8
else:
xi_D = 1
elif a == 1:
xi_D = 1
elif a == 2:
if next_q == 0:
xi_D = 1
else:
xi_D = 3
elif a == 3:
if next_q == 0:
xi_D = 1
else:
xi_D = 2
else:
if next_q == 0:
xi_D = 1
else:
xi_D = 4
dqq = 0
if next_q == 1 and q == 0:
if d[3] == 1:
dqq = 1
elif np.sum(d[6:]) == 3:
dqq = 1
elif next_q == 0 and q == 1:
if d[5] == 1:
dqq = 1
elif np.sum(d[6:]) == 0:
dqq = 1
return xi_D + dqq
def trans_prob(next_s, q, d):
"""
Probability of decision d from state s to state next_s
Args:
next_s = [next_q, next_r, next_w]: Next State
d: Defender's actions
s = [q, r, w]: Current State
m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)
tau: An array denoting the length of each multi-period commitment.
c (nr * nd): cost of defender's each action
h_above: attraction value when response quality is above threshold
h_below: attraction value when response quality is below threshold
g_above: attraction value when response quality is above threshold
g_below: attraction value when response quality is below threshold
dict_r: map resource to corresponding level.
order: Order of ARA. Currently only 0 and 1 are available.
Returns:
prob: Probability.
"""
next_q, next_r, next_w = next_s
A_actions = [0, 1, 2, 3, 4]
prob = 0
for a in A_actions:
prob_r = attraction_h(next_r[0], a)
q1 = attraction_g(next_q[0], q, d, a)
q2 = attraction_g(1-next_q[0], q, d, a)
prob_q = q1 / (q1 + q2)
prob += a_given_s(a, q) * prob_r * prob_q
return prob
|
normal
|
{
"blob_id": "87f3885b4357d66a745932f3c79804e6c15a57fa",
"index": 3162,
"step-1": "<mask token>\n\n\ndef new_w(w, d):\n \"\"\"\n Multi-period commitments in the next epoch.\n Args:\n d: Defender's actions\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n s = [q, r, w]: Current State\n tau: An array denoting the length of each multi-period commitment.\n Returns:\n next_w: Number of decision epochs remaining in the next epoch.\n \"\"\"\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n elif d[0] == 1:\n return np.array([51, 0, 0])\n elif d[1] == 1:\n return np.array([0, 51, 0])\n else:\n return np.array([0, 0, 51])\n\n\ndef attraction_h(next_r, a):\n \"\"\"\n Attraction function of resource (h in the paper).\n Args:\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_r == 9:\n return 0.8\n elif next_r == 14:\n return 0.1\n else:\n return 0.1\n elif a == 1:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.1\n else:\n return 0.8\n elif a == 2:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.3\n else:\n return 0.6\n elif a == 3:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.2\n else:\n return 0.7\n elif next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.4\n else:\n return 0.5\n\n\n<mask token>\n\n\ndef trans_prob(next_s, q, d):\n \"\"\"\n Probability of decision d from state s to state next_s\n Args:\n next_s = [next_q, next_r, next_w]: Next State\n d: Defender's actions\n s = [q, r, w]: Current State\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n tau: An array denoting the length of each multi-period commitment.\n c (nr * nd): cost of defender's each action\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n order: Order of ARA. Currently only 0 and 1 are available.\n Returns:\n prob: Probability.\n \"\"\"\n next_q, next_r, next_w = next_s\n A_actions = [0, 1, 2, 3, 4]\n prob = 0\n for a in A_actions:\n prob_r = attraction_h(next_r[0], a)\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1 - next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n prob += a_given_s(a, q) * prob_r * prob_q\n return prob\n",
"step-2": "<mask token>\n\n\ndef new_w(w, d):\n \"\"\"\n Multi-period commitments in the next epoch.\n Args:\n d: Defender's actions\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n s = [q, r, w]: Current State\n tau: An array denoting the length of each multi-period commitment.\n Returns:\n next_w: Number of decision epochs remaining in the next epoch.\n \"\"\"\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n elif d[0] == 1:\n return np.array([51, 0, 0])\n elif d[1] == 1:\n return np.array([0, 51, 0])\n else:\n return np.array([0, 0, 51])\n\n\ndef attraction_h(next_r, a):\n \"\"\"\n Attraction function of resource (h in the paper).\n Args:\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_r == 9:\n return 0.8\n elif next_r == 14:\n return 0.1\n else:\n return 0.1\n elif a == 1:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.1\n else:\n return 0.8\n elif a == 2:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.3\n else:\n return 0.6\n elif a == 3:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.2\n else:\n return 0.7\n elif next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.4\n else:\n return 0.5\n\n\ndef attraction_g(next_q, q, d, a):\n \"\"\"\n Attraction function of operational conditions (g in the paper).\n Args:\n next_q: Operational conditions in the next epoch.\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_q == 0:\n xi_D = 8\n else:\n xi_D = 1\n elif a == 1:\n xi_D = 1\n elif a == 2:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 3\n elif a == 3:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 2\n elif next_q == 0:\n xi_D = 1\n else:\n xi_D = 4\n dqq = 0\n if next_q == 1 and q == 0:\n if d[3] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 3:\n dqq = 1\n elif next_q == 0 and q == 1:\n if d[5] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 0:\n dqq = 1\n return xi_D + dqq\n\n\ndef trans_prob(next_s, q, d):\n \"\"\"\n Probability of decision d from state s to state next_s\n Args:\n next_s = [next_q, next_r, next_w]: Next State\n d: Defender's actions\n s = [q, r, w]: Current State\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n tau: An array denoting the length of each multi-period commitment.\n c (nr * nd): cost of defender's each action\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n order: Order of ARA. Currently only 0 and 1 are available.\n Returns:\n prob: Probability.\n \"\"\"\n next_q, next_r, next_w = next_s\n A_actions = [0, 1, 2, 3, 4]\n prob = 0\n for a in A_actions:\n prob_r = attraction_h(next_r[0], a)\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1 - next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n prob += a_given_s(a, q) * prob_r * prob_q\n return prob\n",
"step-3": "<mask token>\n\n\ndef theta_given_s(theta, q):\n \"\"\"\n Probability of an random event theta given current state s.\n Args:\n theta: Random event\n s = [q, r, w]: State\n Returns:\n Unnormalized probability of the random event.\n \"\"\"\n if q == 0:\n return 0.3333\n elif theta == 0:\n return 0.25\n elif theta == 1:\n return 0.25\n else:\n return 0.5\n\n\ndef new_w(w, d):\n \"\"\"\n Multi-period commitments in the next epoch.\n Args:\n d: Defender's actions\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n s = [q, r, w]: Current State\n tau: An array denoting the length of each multi-period commitment.\n Returns:\n next_w: Number of decision epochs remaining in the next epoch.\n \"\"\"\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n elif d[0] == 1:\n return np.array([51, 0, 0])\n elif d[1] == 1:\n return np.array([0, 51, 0])\n else:\n return np.array([0, 0, 51])\n\n\ndef attraction_h(next_r, a):\n \"\"\"\n Attraction function of resource (h in the paper).\n Args:\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_r == 9:\n return 0.8\n elif next_r == 14:\n return 0.1\n else:\n return 0.1\n elif a == 1:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.1\n else:\n return 0.8\n elif a == 2:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.3\n else:\n return 0.6\n elif a == 3:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.2\n else:\n return 0.7\n elif next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.4\n else:\n return 0.5\n\n\ndef attraction_g(next_q, q, d, a):\n \"\"\"\n Attraction function of operational conditions (g in the paper).\n Args:\n next_q: Operational conditions in the next epoch.\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_q == 0:\n xi_D = 8\n else:\n xi_D = 1\n elif a == 1:\n xi_D = 1\n elif a == 2:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 3\n elif a == 3:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 2\n elif next_q == 0:\n xi_D = 1\n else:\n xi_D = 4\n dqq = 0\n if next_q == 1 and q == 0:\n if d[3] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 3:\n dqq = 1\n elif next_q == 0 and q == 1:\n if d[5] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 0:\n dqq = 1\n return xi_D + dqq\n\n\ndef trans_prob(next_s, q, d):\n \"\"\"\n Probability of decision d from state s to state next_s\n Args:\n next_s = [next_q, next_r, next_w]: Next State\n d: Defender's actions\n s = [q, r, w]: Current State\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n tau: An array denoting the length of each multi-period commitment.\n c (nr * nd): cost of defender's each action\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n order: Order of ARA. Currently only 0 and 1 are available.\n Returns:\n prob: Probability.\n \"\"\"\n next_q, next_r, next_w = next_s\n A_actions = [0, 1, 2, 3, 4]\n prob = 0\n for a in A_actions:\n prob_r = attraction_h(next_r[0], a)\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1 - next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n prob += a_given_s(a, q) * prob_r * prob_q\n return prob\n",
"step-4": "import numpy as np\nfrom ARA import *\nfrom State import *\n\n\ndef theta_given_s(theta, q):\n \"\"\"\n Probability of an random event theta given current state s.\n Args:\n theta: Random event\n s = [q, r, w]: State\n Returns:\n Unnormalized probability of the random event.\n \"\"\"\n if q == 0:\n return 0.3333\n elif theta == 0:\n return 0.25\n elif theta == 1:\n return 0.25\n else:\n return 0.5\n\n\ndef new_w(w, d):\n \"\"\"\n Multi-period commitments in the next epoch.\n Args:\n d: Defender's actions\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n s = [q, r, w]: Current State\n tau: An array denoting the length of each multi-period commitment.\n Returns:\n next_w: Number of decision epochs remaining in the next epoch.\n \"\"\"\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n elif d[0] == 1:\n return np.array([51, 0, 0])\n elif d[1] == 1:\n return np.array([0, 51, 0])\n else:\n return np.array([0, 0, 51])\n\n\ndef attraction_h(next_r, a):\n \"\"\"\n Attraction function of resource (h in the paper).\n Args:\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_r == 9:\n return 0.8\n elif next_r == 14:\n return 0.1\n else:\n return 0.1\n elif a == 1:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.1\n else:\n return 0.8\n elif a == 2:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.3\n else:\n return 0.6\n elif a == 3:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.2\n else:\n return 0.7\n elif next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.4\n else:\n return 0.5\n\n\ndef attraction_g(next_q, q, d, a):\n \"\"\"\n Attraction function of operational conditions (g in the paper).\n Args:\n next_q: Operational conditions in the next epoch.\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_q == 0:\n xi_D = 8\n else:\n xi_D = 1\n elif a == 1:\n xi_D = 1\n elif a == 2:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 3\n elif a == 3:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 2\n elif next_q == 0:\n xi_D = 1\n else:\n xi_D = 4\n dqq = 0\n if next_q == 1 and q == 0:\n if d[3] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 3:\n dqq = 1\n elif next_q == 0 and q == 1:\n if d[5] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 0:\n dqq = 1\n return xi_D + dqq\n\n\ndef trans_prob(next_s, q, d):\n \"\"\"\n Probability of decision d from state s to state next_s\n Args:\n next_s = [next_q, next_r, next_w]: Next State\n d: Defender's actions\n s = [q, r, w]: Current State\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n tau: An array denoting the length of each multi-period commitment.\n c (nr * nd): cost of defender's each action\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n order: Order of ARA. Currently only 0 and 1 are available.\n Returns:\n prob: Probability.\n \"\"\"\n next_q, next_r, next_w = next_s\n A_actions = [0, 1, 2, 3, 4]\n prob = 0\n for a in A_actions:\n prob_r = attraction_h(next_r[0], a)\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1 - next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n prob += a_given_s(a, q) * prob_r * prob_q\n return prob\n",
"step-5": "import numpy as np\nfrom ARA import *\nfrom State import *\n\ndef theta_given_s(theta, q):\n \"\"\"\n Probability of an random event theta given current state s.\n Args:\n theta: Random event\n s = [q, r, w]: State\n Returns:\n Unnormalized probability of the random event.\n \"\"\"\n if q == 0:\n return .3333\n else:\n if theta == 0:\n return 0.25\n elif theta == 1:\n return 0.25\n else:\n return 0.5\n\n\ndef new_w(w, d):\n \"\"\"\n Multi-period commitments in the next epoch.\n Args:\n d: Defender's actions\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n s = [q, r, w]: Current State\n tau: An array denoting the length of each multi-period commitment.\n Returns:\n next_w: Number of decision epochs remaining in the next epoch.\n \"\"\"\n\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n else:\n if d[0] == 1:\n return np.array([51,0,0])\n elif d[1] == 1:\n return np.array([0,51,0])\n else:\n return np.array([0,0,51])\n\n\n\ndef attraction_h(next_r, a):\n \"\"\"\n Attraction function of resource (h in the paper).\n Args:\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n if a == 0:\n if next_r == 9:\n return 0.8\n elif next_r == 14:\n return 0.1\n else:\n return 0.1\n\n elif a == 1:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.1\n else:\n return 0.8\n\n elif a == 2:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.3\n else:\n return 0.6\n\n elif a == 3:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.2\n else:\n return 0.7\n\n else:\n if next_r == 9:\n return 0.1\n elif next_r == 14:\n return 0.4\n else:\n return 0.5\n\n\ndef attraction_g(next_q, q, d, a):\n \"\"\"\n Attraction function of operational conditions (g in the paper).\n Args:\n next_q: Operational conditions in the next epoch.\n next_r: Probable resource array in the next epoch.\n next_w: Multi-period commitments in the next epoch.\n d: Defender's actions\n a: Attacker's actions\n s = [q, r, w]: Current State\n rho_da: A map mapping from (d_i, a_j) to response quality\n rho_dq: A map mapping from (d_i, q) to response quality\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n thres: Threshold for a good response.\n Returns:\n Attraction value.\n \"\"\"\n\n if a == 0:\n if next_q == 0:\n xi_D = 8\n else:\n xi_D = 1\n\n elif a == 1:\n xi_D = 1\n\n elif a == 2:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 3\n\n elif a == 3:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 2\n\n else:\n if next_q == 0:\n xi_D = 1\n else:\n xi_D = 4\n\n dqq = 0\n if next_q == 1 and q == 0:\n if d[3] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 3:\n dqq = 1\n elif next_q == 0 and q == 1:\n if d[5] == 1:\n dqq = 1\n elif np.sum(d[6:]) == 0:\n dqq = 1\n\n return xi_D + dqq\n\n\n\ndef trans_prob(next_s, q, d):\n \"\"\"\n Probability of decision d from state s to state next_s\n Args:\n next_s = [next_q, next_r, next_w]: Next State\n d: Defender's actions\n s = [q, r, w]: Current State\n m: Number of non multi-period commitments. (i.e. The first m defender's actions are not multi-period)\n tau: An array denoting the length of each multi-period commitment.\n c (nr * nd): cost of defender's each action\n h_above: attraction value when response quality is above threshold\n h_below: attraction value when response quality is below threshold\n g_above: attraction value when response quality is above threshold\n g_below: attraction value when response quality is below threshold\n dict_r: map resource to corresponding level.\n order: Order of ARA. Currently only 0 and 1 are available.\n Returns:\n prob: Probability.\n \"\"\"\n\n next_q, next_r, next_w = next_s\n\n A_actions = [0, 1, 2, 3, 4]\n\n prob = 0\n\n for a in A_actions:\n\n prob_r = attraction_h(next_r[0], a)\n\n q1 = attraction_g(next_q[0], q, d, a)\n q2 = attraction_g(1-next_q[0], q, d, a)\n prob_q = q1 / (q1 + q2)\n\n prob += a_given_s(a, q) * prob_r * prob_q\n\n return prob\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 2.2.16 on 2020-10-27 14:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trades', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orderinfo',
name='nonce_str',
field=models.CharField(blank=True, max_length=50, null=True, unique=True, verbose_name='随机加密串'),
),
]
|
normal
|
{
"blob_id": "4e04e748a97c59a26a394b049c15d96476b98517",
"index": 9382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('trades', '0001_initial')]\n operations = [migrations.AddField(model_name='orderinfo', name=\n 'nonce_str', field=models.CharField(blank=True, max_length=50, null\n =True, unique=True, verbose_name='随机加密串'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('trades', '0001_initial')]\n operations = [migrations.AddField(model_name='orderinfo', name=\n 'nonce_str', field=models.CharField(blank=True, max_length=50, null\n =True, unique=True, verbose_name='随机加密串'))]\n",
"step-5": "# Generated by Django 2.2.16 on 2020-10-27 14:55\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('trades', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='orderinfo',\n name='nonce_str',\n field=models.CharField(blank=True, max_length=50, null=True, unique=True, verbose_name='随机加密串'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#coding=utf-8
#
"""
my custom common module
"""
import json
import base64
# sdk账号信息
APP_ID = '10676432'
API_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'
SECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'
def print_json(obj):
"""json格式打印信息
Args:
obj 待打印的对象信息
"""
print(json.dumps(obj, ensure_ascii=False))
def print_error(err_code, err_msg):
"""格式化打印错误信息
Args:
err_code: 错误码
err_msg: 错误信息
"""
print(u"[{0}]: {1}".format(err_code, err_msg))
def get_image_base64_content(image_file):
"""获取图片base64编码信息
Args:
image_file: 图片
Returns:
base64编码的图片信息
"""
with open(image_file, 'rb') as fp:
return str(base64.b64encode(fp.read()), 'utf-8')
|
normal
|
{
"blob_id": "0b0eebd31d822ff5c1b951c3ee213f58a3a13aa0",
"index": 134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef print_json(obj):\n \"\"\"json格式打印信息\n\n Args:\n obj 待打印的对象信息\n \"\"\"\n print(json.dumps(obj, ensure_ascii=False))\n\n\ndef print_error(err_code, err_msg):\n \"\"\"格式化打印错误信息\n\n Args:\n err_code: 错误码\n err_msg: 错误信息\n \"\"\"\n print(u'[{0}]: {1}'.format(err_code, err_msg))\n\n\ndef get_image_base64_content(image_file):\n \"\"\"获取图片base64编码信息\n\n Args:\n image_file: 图片\n\n Returns:\n base64编码的图片信息\n \"\"\"\n with open(image_file, 'rb') as fp:\n return str(base64.b64encode(fp.read()), 'utf-8')\n",
"step-3": "<mask token>\nAPP_ID = '10676432'\nAPI_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'\nSECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'\n\n\ndef print_json(obj):\n \"\"\"json格式打印信息\n\n Args:\n obj 待打印的对象信息\n \"\"\"\n print(json.dumps(obj, ensure_ascii=False))\n\n\ndef print_error(err_code, err_msg):\n \"\"\"格式化打印错误信息\n\n Args:\n err_code: 错误码\n err_msg: 错误信息\n \"\"\"\n print(u'[{0}]: {1}'.format(err_code, err_msg))\n\n\ndef get_image_base64_content(image_file):\n \"\"\"获取图片base64编码信息\n\n Args:\n image_file: 图片\n\n Returns:\n base64编码的图片信息\n \"\"\"\n with open(image_file, 'rb') as fp:\n return str(base64.b64encode(fp.read()), 'utf-8')\n",
"step-4": "<mask token>\nimport json\nimport base64\nAPP_ID = '10676432'\nAPI_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'\nSECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'\n\n\ndef print_json(obj):\n \"\"\"json格式打印信息\n\n Args:\n obj 待打印的对象信息\n \"\"\"\n print(json.dumps(obj, ensure_ascii=False))\n\n\ndef print_error(err_code, err_msg):\n \"\"\"格式化打印错误信息\n\n Args:\n err_code: 错误码\n err_msg: 错误信息\n \"\"\"\n print(u'[{0}]: {1}'.format(err_code, err_msg))\n\n\ndef get_image_base64_content(image_file):\n \"\"\"获取图片base64编码信息\n\n Args:\n image_file: 图片\n\n Returns:\n base64编码的图片信息\n \"\"\"\n with open(image_file, 'rb') as fp:\n return str(base64.b64encode(fp.read()), 'utf-8')\n",
"step-5": "#coding=utf-8\n#\n\"\"\"\nmy custom common module\n\"\"\"\nimport json\nimport base64\n\n# sdk账号信息\nAPP_ID = '10676432'\nAPI_KEY = 'Hy1D1urUTdXzTOzqr9LeN3gc'\nSECRET_KEY = 'foS4GMg2w3QZtO9XNoSQF17Kkk007xWk'\n\n\ndef print_json(obj):\n \"\"\"json格式打印信息\n\n Args:\n obj 待打印的对象信息\n \"\"\"\n print(json.dumps(obj, ensure_ascii=False))\n\n\ndef print_error(err_code, err_msg):\n \"\"\"格式化打印错误信息\n\n Args:\n err_code: 错误码\n err_msg: 错误信息\n \"\"\"\n print(u\"[{0}]: {1}\".format(err_code, err_msg))\n\n\ndef get_image_base64_content(image_file):\n \"\"\"获取图片base64编码信息\n\n Args:\n image_file: 图片\n\n Returns:\n base64编码的图片信息\n \"\"\"\n with open(image_file, 'rb') as fp:\n return str(base64.b64encode(fp.read()), 'utf-8')\n\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
# ------------------------------------------
#
# Project: VEXcode VR Maze Solver
# Author: Hyunwoo Choi
# Created: January 12 2021
# Description: Solves a VEXcode VR maze using the right hand rule
#
# ------------------------------------------
# Library imports
from vexcode import *
#main
def main():
#putting down the pen to show the path of the robot
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
#start with 90 deg turned right since we are using a right hand rule to solve this maze
drivetrain.turn_for(RIGHT, 90, DEGREES)
#run
run()
#this method checks all three sides and returns a boolean for each side if it is blocked or not
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
#main run function
def run():
#program loop
while True:
#drive
drivetrain.drive_for(FORWARD, 250, MM)
#checks if the robot's surroundings are clear by using the method above
rightClear, frontClear, leftClear = checkSides()
#uses the 3 boolean values above to determine the which direction to turn
if frontClear and not rightClear:
print("")
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif (not (rightClear and frontClear)) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
#if found an exit, stop
if(down_eye.detect(RED)):
break
wait(1,MSEC)
# VR threads — Do not delete
vr_thread(main())
|
normal
|
{
"blob_id": "e560f2f202e477822729d1361b8d7ef7831a00e6",
"index": 8339,
"step-1": "<mask token>\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\nvr_thread(main())\n",
"step-4": "from vexcode import *\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\nvr_thread(main())\n",
"step-5": "# ------------------------------------------\n# \n# \tProject: VEXcode VR Maze Solver\n#\tAuthor: Hyunwoo Choi\n#\tCreated: January 12 2021\n#\tDescription: Solves a VEXcode VR maze using the right hand rule\n# \n# ------------------------------------------\n\n# Library imports\nfrom vexcode import *\n\n#main\ndef main():\n #putting down the pen to show the path of the robot\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n\n \n #start with 90 deg turned right since we are using a right hand rule to solve this maze\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n \n #run\n run()\n\n#this method checks all three sides and returns a boolean for each side if it is blocked or not\ndef checkSides():\n \n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n \n drivetrain.turn_for(RIGHT, 90, DEGREES)\n\n return rightC, frontC, leftC\n\n#main run function\ndef run():\n #program loop\n while True:\n\n #drive\n drivetrain.drive_for(FORWARD, 250, MM)\n\n #checks if the robot's surroundings are clear by using the method above\n rightClear, frontClear, leftClear = checkSides()\n\n #uses the 3 boolean values above to determine the which direction to turn\n if frontClear and not rightClear:\n print(\"\")\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif (not (rightClear and frontClear)) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n\n #if found an exit, stop\n if(down_eye.detect(RED)):\n break\n\n wait(1,MSEC)\n\n \n \n# VR threads — Do not delete\nvr_thread(main())\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
|
normal
|
{
"blob_id": "419aee3045a0d532afa0fc314df9cdef7aab5219",
"index": 4181,
"step-1": "<mask token>\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-2": "<mask token>\n\n\nclass DWIdenoise(CommandLine):\n <mask token>\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-3": "<mask token>\n\n\nclass DWIdenoiseOutputSpec(TraitedSpec):\n <mask token>\n\n\nclass DWIdenoise(CommandLine):\n \"\"\"Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the \n noise level based on the optimal threshold for PCA.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-4": "<mask token>\n\n\nclass DWIdenoiseInputSpec(CommandLineInputSpec):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DWIdenoiseOutputSpec(TraitedSpec):\n out_file = File(desc='the output denoised DWI image', exists=True)\n\n\nclass DWIdenoise(CommandLine):\n \"\"\"Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the \n noise level based on the optimal threshold for PCA.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 13:13:53 2018\n\n@author: zhang\n\"\"\"\n'''\nWarp Commands use during diffusion-weighted images preprocessing\n================================================================\ndwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL\n-------------------------------------------------------------------------\nfor unkonwn reason they are not included after loading relavant interface\n'''\nfrom nipype.interfaces.base import (CommandLine, \n CommandLineInputSpec,\n File, \n TraitedSpec, \n traits,\n isdefined,\n InputMultiPath)\nimport os\n\n# warp the dwidenoise function from MRtrix\nclass DWIdenoiseInputSpec(CommandLineInputSpec):\n in_file = InputMultiPath(\n File(exists=True), \n mandatory=True,\n position=0,\n argstr=\"%s\",\n desc=\"input DWI image\")\n noise = File(\n argstr='-noise %s',\n desc='noise map')\n force = traits.Bool(\n desc='force overwrite of output files', \n position=-1,\n argstr='-force')\n out_file = File(name_template='%s_denoised',\n name_source='in_file',\n keep_extension=True,\n argstr=\"%s\",\n position=1,\n desc=\"the output denoised DWI image\")\n\nclass DWIdenoiseOutputSpec(TraitedSpec):\n out_file = File(desc = \"the output denoised DWI image\", exists = True)\n\nclass DWIdenoise(CommandLine):\n \"\"\"Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the \n noise level based on the optimal threshold for PCA.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\n# warp the unring function from MRtrix \nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(\n desc=\"input DWI image\", \n exists=True, \n mandatory=True,\n position=0,\n argstr=\"%s\")\n force = traits.Bool(\n desc='force overwrite of output files', \n position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed',\n name_source='in_file',\n keep_extension=True,\n argstr=\"%s\",\n position=1,\n desc=\"the output unringed DWI image\")\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc = \"the output unringed DWI image\", exists = True)\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\n# Wrap FSL eddy (copy from nipype interface)\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n mandatory=True,\n argstr='--imain=%s',\n desc=('File containing all the images to estimate '\n 'distortions for'))\n in_mask = File(\n exists=True,\n mandatory=True,\n argstr='--mask=%s',\n desc='Mask to indicate brain')\n in_index = File(\n exists=True,\n mandatory=True,\n argstr='--index=%s',\n desc=('File containing indices for all volumes in --imain '\n 'into --acqp and --topup'))\n in_acqp = File(\n exists=True,\n mandatory=True,\n argstr='--acqp=%s',\n desc='File containing acquisition parameters')\n in_bvec = File(\n exists=True,\n mandatory=True,\n argstr='--bvecs=%s',\n desc=('File containing the b-vectors for all volumes in '\n '--imain'))\n in_bval = File(\n exists=True,\n mandatory=True,\n argstr='--bvals=%s',\n desc=('File containing the b-values for all volumes in '\n '--imain'))\n out_base = traits.Str(\n 'eddy_corrected',\n argstr='--out=%s',\n usedefault=True,\n desc=('basename for output (warped) image'))\n session = File(\n exists=True,\n argstr='--session=%s',\n desc=('File containing session indices for all volumes in '\n '--imain'))\n in_topup_fieldcoef = File(\n exists=True,\n argstr=\"--topup=%s\",\n requires=['in_topup_movpar'],\n desc=('topup file containing the field '\n 'coefficients'))\n in_topup_movpar = File(\n exists=True,\n requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum(\n 'linear',\n 'quadratic',\n 'cubic',\n argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum(\n 'none',\n 'linear',\n 'quadratic',\n argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(\n False, argstr='--fep', desc='Fill empty planes in x- or y-directions')\n interp = traits.Enum(\n 'spline',\n 'trilinear',\n argstr='--interp=%s',\n desc='Interpolation model for estimation step')\n nvoxhp = traits.Int(\n 1000, usedefault=True,\n argstr='--nvoxhp=%s',\n desc=('# of voxels used to estimate the '\n 'hyperparameters'))\n fudge_factor = traits.Float(\n 10.0, usedefault=True,\n argstr='--ff=%s',\n desc=('Fudge factor for hyperparameter '\n 'error variance'))\n dont_sep_offs_move = traits.Bool(\n False,\n argstr='--dont_sep_offs_move',\n desc=('Do NOT attempt to separate '\n 'field offset from subject '\n 'movement'))\n dont_peas = traits.Bool(\n False,\n argstr='--dont_peas',\n desc=\"Do NOT perform a post-eddy alignment of \"\n \"shells\")\n fwhm = traits.Float(\n desc=('FWHM for conditioning filter when estimating '\n 'the parameters'),\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True,\n argstr='--niter=%s', desc='Number of iterations')\n method = traits.Enum(\n 'jac',\n 'lsr',\n argstr='--resamp=%s',\n desc=('Final resampling method (jacobian/least '\n 'squares)'))\n repol = traits.Bool(\n False, argstr='--repol', desc='Detect and replace outlier slices')\n num_threads = traits.Int(\n 1,\n usedefault=True,\n nohash=True,\n desc=\"Number of openmp threads to use\")\n is_shelled = traits.Bool(\n False,\n argstr='--data_is_shelled',\n desc=\"Override internal check to ensure that \"\n \"date are acquired on a set of b-value \"\n \"shells\")\n field = traits.Str(\n argstr='--field=%s',\n desc=\"NonTOPUP fieldmap scaled in Hz - filename has \"\n \"to be provided without an extension. TOPUP is \"\n \"strongly recommended\")\n field_mat = File(\n exists=True,\n argstr='--field_mat=%s',\n desc=\"Matrix that specifies the relative locations of \"\n \"the field specified by --field and first volume \"\n \"in file --imain\")\n use_cuda = traits.Bool(False, desc=\"Run eddy using cuda gpu\")\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(\n exists=True, desc='4D image file containing all the corrected volumes')\n out_parameter = File(\n exists=True,\n desc=('text file with parameters definining the field and'\n 'movement for each scan'))\n out_rotated_bvecs = File(\n exists=True, desc='File containing rotated b-values for all volumes')\n out_movement_rms = File(\n exists=True, desc='Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(\n exists=True,\n desc=('Summary of the \"total movement\" in each volume '\n 'disregarding translation in the PE direction'))\n out_shell_alignment_parameters = File(\n exists=True,\n desc=('File containing rigid body movement parameters '\n 'between the different shells as estimated by a '\n 'post-hoc mutual information based registration'))\n out_outlier_report = File(\n exists=True,\n desc=('Text-file with a plain language report on what '\n 'outlier slices eddy has found'))\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \\\n--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \\\n--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \\\n--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \\\n--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(\n self.inputs.num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n # If 'eddy_openmp' is missing, use 'eddy'\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp',\n not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n\n # Restore command to avoid side-effects\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath(\n '%s.nii.gz' % self.inputs.out_base)\n outputs['out_parameter'] = os.path.abspath(\n '%s.eddy_parameters' % self.inputs.out_base)\n\n # File generation might depend on the version of EDDY\n out_rotated_bvecs = os.path.abspath(\n '%s.eddy_rotated_bvecs' % self.inputs.out_base)\n out_movement_rms = os.path.abspath(\n '%s.eddy_movement_rms' % self.inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' %\n self.inputs.out_base)\n out_outlier_report = os.path.abspath(\n '%s.eddy_outlier_report' % self.inputs.out_base)\n\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'] = \\\n out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'] = \\\n out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n\n return outputs\n",
"step-ids": [
20,
22,
24,
26,
29
]
}
|
[
20,
22,
24,
26,
29
] |
"""
Configuration management.
Environment must be set before use.
Call .get() to obtain configuration variable. If the variable does not exist
in the set environment, then
"""
CONFIG_KEY = "config_class"
ENV = {}
class EMPTY:
"""
Signifies that a default value was not set. Should trigger an error if
default is set to EMPTY and an attribute does not exist.
"""
pass
class Config:
"""
Configuration management entity.
Args:
name (str): Name of config environment.
fallback (bool): Indicate if configuration should fallback to base.
"""
no_config_err = "No such config variable {}"
def __init__(self, name, fallback):
from importlib import import_module
from os import listdir
from os.path import dirname
self.config_path = dirname(__file__)
self.name = name
self.fallback = fallback
# List of config modules available
self.config_modules = set([
i.strip(".py")
for i in listdir(self.config_path)
if ".py" in i and i != "__init__.py"
])
if name not in self.config_modules:
err = "Config environment {} does not exist".format(name)
raise AttributeError(err)
if self.fallback:
# Fallback configuration module.
self.base = import_module("illume.config.base")
# Desired configuration module.
self.module = import_module("illume.config.{}".format(self.name))
def get(self, name, default):
"""Get config value"""
value = getattr(self.module, name, default)
if value != EMPTY:
return value
elif value == EMPTY and not self.fallback:
raise AttributeError(self.no_config_err.format(name))
elif value == EMPTY and self.fallback:
value = getattr(self.base, name, default)
if value == EMPTY:
raise AttributeError(self.no_config_err.format(name))
return value
def setenv(name, fallback=True):
"""Set configuration environment."""
if CONFIG_KEY in ENV:
raise AttributeError("Config environment already set.")
config_class = Config(name, fallback)
ENV[CONFIG_KEY] = config_class
def get(name, default=EMPTY):
"""Get configuration variable."""
config_class = ENV.get(CONFIG_KEY, None)
if config_class is None:
raise AttributeError("Config environment not set.")
return config_class.get(name, default)
|
normal
|
{
"blob_id": "263d2fe43cf8747f20fd51897ba003c9c4cb4280",
"index": 9907,
"step-1": "<mask token>\n\n\nclass EMPTY:\n <mask token>\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EMPTY:\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EMPTY:\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\n<mask token>\n\n\ndef get(name, default=EMPTY):\n \"\"\"Get configuration variable.\"\"\"\n config_class = ENV.get(CONFIG_KEY, None)\n if config_class is None:\n raise AttributeError('Config environment not set.')\n return config_class.get(name, default)\n",
"step-4": "<mask token>\n\n\nclass EMPTY:\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n pass\n\n\nclass Config:\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n no_config_err = 'No such config variable {}'\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n self.config_modules = set([i.strip('.py') for i in listdir(self.\n config_path) if '.py' in i and i != '__init__.py'])\n if name not in self.config_modules:\n err = 'Config environment {} does not exist'.format(name)\n raise AttributeError(err)\n if self.fallback:\n self.base = import_module('illume.config.base')\n self.module = import_module('illume.config.{}'.format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n return value\n\n\ndef setenv(name, fallback=True):\n \"\"\"Set configuration environment.\"\"\"\n if CONFIG_KEY in ENV:\n raise AttributeError('Config environment already set.')\n config_class = Config(name, fallback)\n ENV[CONFIG_KEY] = config_class\n\n\ndef get(name, default=EMPTY):\n \"\"\"Get configuration variable.\"\"\"\n config_class = ENV.get(CONFIG_KEY, None)\n if config_class is None:\n raise AttributeError('Config environment not set.')\n return config_class.get(name, default)\n",
"step-5": "\"\"\"\nConfiguration management.\n\nEnvironment must be set before use.\n\nCall .get() to obtain configuration variable. If the variable does not exist\nin the set environment, then\n\"\"\"\n\n\nCONFIG_KEY = \"config_class\"\nENV = {}\n\n\nclass EMPTY:\n\n \"\"\"\n Signifies that a default value was not set. Should trigger an error if\n default is set to EMPTY and an attribute does not exist.\n \"\"\"\n\n pass\n\n\nclass Config:\n\n \"\"\"\n Configuration management entity.\n\n Args:\n name (str): Name of config environment.\n fallback (bool): Indicate if configuration should fallback to base.\n \"\"\"\n\n no_config_err = \"No such config variable {}\"\n\n def __init__(self, name, fallback):\n from importlib import import_module\n from os import listdir\n from os.path import dirname\n\n self.config_path = dirname(__file__)\n self.name = name\n self.fallback = fallback\n\n # List of config modules available\n self.config_modules = set([\n i.strip(\".py\")\n for i in listdir(self.config_path)\n if \".py\" in i and i != \"__init__.py\"\n ])\n\n if name not in self.config_modules:\n err = \"Config environment {} does not exist\".format(name)\n\n raise AttributeError(err)\n\n if self.fallback:\n # Fallback configuration module.\n self.base = import_module(\"illume.config.base\")\n\n # Desired configuration module.\n self.module = import_module(\"illume.config.{}\".format(self.name))\n\n def get(self, name, default):\n \"\"\"Get config value\"\"\"\n value = getattr(self.module, name, default)\n\n if value != EMPTY:\n return value\n elif value == EMPTY and not self.fallback:\n raise AttributeError(self.no_config_err.format(name))\n elif value == EMPTY and self.fallback:\n value = getattr(self.base, name, default)\n\n if value == EMPTY:\n raise AttributeError(self.no_config_err.format(name))\n\n return value\n\n\ndef setenv(name, fallback=True):\n \"\"\"Set configuration environment.\"\"\"\n if CONFIG_KEY in ENV:\n raise AttributeError(\"Config environment already set.\")\n\n config_class = Config(name, fallback)\n\n ENV[CONFIG_KEY] = config_class\n\n\ndef get(name, default=EMPTY):\n \"\"\"Get configuration variable.\"\"\"\n config_class = ENV.get(CONFIG_KEY, None)\n\n if config_class is None:\n raise AttributeError(\"Config environment not set.\")\n\n return config_class.get(name, default)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
import os
from enum import Enum
STAFF_CODE = os.getenv('STAFF_CODE', '20190607')
ADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')
TEAM_NAMES = (
'밍크고래팀',
'혹등고래팀',
'대왕고래팀',
'향유고래팀',
)
TEAM_COUNT = 3
MAX_TEAM_MEMBER_COUNT = 10
class TIME_CHECK(Enum):
BEFORE_START = 0
DURING_TIME = 1
AFTER_END = 2
|
normal
|
{
"blob_id": "967984444d9e26452226b13f33c5afbc96b5fe2b",
"index": 3176,
"step-1": "<mask token>\n\n\nclass TIME_CHECK(Enum):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-3": "<mask token>\nSTAFF_CODE = os.getenv('STAFF_CODE', '20190607')\nADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')\nTEAM_NAMES = '밍크고래팀', '혹등고래팀', '대왕고래팀', '향유고래팀'\nTEAM_COUNT = 3\nMAX_TEAM_MEMBER_COUNT = 10\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-4": "import os\nfrom enum import Enum\nSTAFF_CODE = os.getenv('STAFF_CODE', '20190607')\nADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')\nTEAM_NAMES = '밍크고래팀', '혹등고래팀', '대왕고래팀', '향유고래팀'\nTEAM_COUNT = 3\nMAX_TEAM_MEMBER_COUNT = 10\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-5": "import os\nfrom enum import Enum\n\nSTAFF_CODE = os.getenv('STAFF_CODE', '20190607')\nADMIN_CODE = os.getenv('ADMIN_CODE', 'nerd-bear')\n\nTEAM_NAMES = (\n '밍크고래팀',\n '혹등고래팀',\n '대왕고래팀',\n '향유고래팀',\n)\nTEAM_COUNT = 3\nMAX_TEAM_MEMBER_COUNT = 10\n\n\nclass TIME_CHECK(Enum):\n BEFORE_START = 0\n DURING_TIME = 1\n AFTER_END = 2\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# coding: utf-8
import sys,pysrt
import urllib2,urllib,json
import re
from urlparse import urlparse
import os
from mtranslate import translate
from argparse import ArgumentParser
reload(sys)
sys.setdefaultencoding('utf8')
#----------------------------------------------------------------------------------------------------------------------------------
def cleanhtml(raw_html):
'''
TODO: refactor this to make it as generic as possible
'''
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleantext = cleantext.replace('[vacilación]','...')
cleantext = cleantext.replace(' ',' ')
cleantext = urlparse(cleantext).path
return cleantext
#----------------------------------------------------------------------------------------------------------------------------------
def generateSub(args,_subtitle,_filename):
subs = pysrt.from_string(str(_subtitle).decode('utf-8'))
output = args.OUTPUT + _filename
#file = pysrt.SubRipFile()
text = ''
for index in range(len(subs)):
if subs[index].text != '':
if args.VERBOSE:
print "Translating line:" + cleanhtml(subs[index].text)
subs[index].text = translate(cleanhtml(subs[index].text).encode('utf-8'),args.LANG_TO,args.LANG_FROM)
subs.save(output)
#----------------------------------------------------------------------------------------------------------------------------------
def generateSubMedia(args):
subLangURL= 'https://media.upv.es/rest/plugins/admin-plugin-translectures/langs/'
subUrl = 'https://media.upv.es/rest/plugins/admin-plugin-translectures/srt/'
langlist =json.loads(urllib2.urlopen(subLangURL + args.SOURCE).read())
for lang in langlist:
if lang['lang']==args.LANG_FROM:
sub = urllib2.urlopen(subUrl + args.SOURCE +'/' + args.LANG_FROM).read()
generateSub(args,sub,args.SOURCE+'_' + args.LANG_TO.lower() + '.srt')
return 0
#----------------------------------------------------------------------------------------------------------------------------------
def generateSubFile(args,_filename=None):
if _filename is None:
_source = args.SOURCE
else:
_source = _filename
if _source[-4:]=='.srt':
substring = open(_source,'r').read()
generateSub(args,substring,_source.replace('.srt','_' + args.LANG_TO + '.srt'))
else:
print "Incorrect file format"
return -1
#----------------------------------------------------------------------------------------------------------------------------------
def generateSubFolder(args):
_source = args.SOURCE if args.SOURCE[-1:]=='/' else args.SOURCE + '/'
if os.path.isdir(args.SOURCE):
for root, dirs, files in os.walk(args.SOURCE):
for f in files:
if f[-4:]=='.srt':
substring = open(root + f if root[-1:]=='/' else root + '/' + f,'r').read()
generateSub(args,substring,f.replace('.srt','_' + args.LANG_TO + '.srt'))
else:
print "Incorrect file format"
return -1
#----------------------------------------------------------------------------------------------------------------------------------
def main():
parser = ArgumentParser(description='Translate subtitle from media id, file or folder', parents=[])
parser.add_argument('-v', '--verbose', action='store_true', dest='VERBOSE', default=False, help='Verbose')
parser.add_argument('-t', '--sourceType', type=str, dest='SOURCE_TYPE', help='source type, pick between media|file|folder')
parser.add_argument('-s', '--source', type=str, dest='SOURCE', help='source of the subtitle/s')
parser.add_argument('-langf', '--langFrom', type=str, dest='LANG_FROM', default='es', help='Language that we want to translate')
parser.add_argument('-langt', '--langTo', type=str, dest='LANG_TO', default='en', help='Language of the output subtitle')
parser.add_argument('-o', '--output', type=str, dest='OUTPUT', default='./', help='Output folder to store the result')
args = parser.parse_args()
if (args.SOURCE_TYPE.lower()=='file'):
try:
generateSubFile(args)
except:
return -1
elif (args.SOURCE_TYPE.lower()=='folder'):
try:
generateSubFolder(args)
except:
return -1
elif (args.SOURCE_TYPE.lower()=='media'):
try:
generateSubMedia(args)
except:
return -1
else:
print "Choose a valid source type"
return 0
#----------------------------------------------------------------------------------------------------------------------------------
if (__name__ == '__main__'):
main()
|
normal
|
{
"blob_id": "e51c57f4487a3225936d073142f1f770815c0d47",
"index": 7589,
"step-1": "#!/usr/bin/env python\n# coding: utf-8\nimport sys,pysrt\nimport urllib2,urllib,json\nimport re\nfrom urlparse import urlparse\nimport os\nfrom mtranslate import translate\nfrom argparse import ArgumentParser\nreload(sys) \nsys.setdefaultencoding('utf8')\n\n#----------------------------------------------------------------------------------------------------------------------------------\ndef cleanhtml(raw_html):\n '''\n TODO: refactor this to make it as generic as possible\n '''\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n cleantext = cleantext.replace('[vacilación]','...')\n cleantext = cleantext.replace(' ',' ')\n cleantext = urlparse(cleantext).path \n return cleantext\n\n#----------------------------------------------------------------------------------------------------------------------------------\ndef generateSub(args,_subtitle,_filename): \n subs = pysrt.from_string(str(_subtitle).decode('utf-8')) \n output = args.OUTPUT + _filename\n #file = pysrt.SubRipFile() \n text = '' \n for index in range(len(subs)): \n if subs[index].text != '': \n if args.VERBOSE:\n print \"Translating line:\" + cleanhtml(subs[index].text) \n subs[index].text = translate(cleanhtml(subs[index].text).encode('utf-8'),args.LANG_TO,args.LANG_FROM) \n subs.save(output)\n\n#----------------------------------------------------------------------------------------------------------------------------------\ndef generateSubMedia(args):\n subLangURL= 'https://media.upv.es/rest/plugins/admin-plugin-translectures/langs/'\n subUrl = 'https://media.upv.es/rest/plugins/admin-plugin-translectures/srt/' \n langlist =json.loads(urllib2.urlopen(subLangURL + args.SOURCE).read()) \n for lang in langlist: \n if lang['lang']==args.LANG_FROM: \n sub = urllib2.urlopen(subUrl + args.SOURCE +'/' + args.LANG_FROM).read() \n generateSub(args,sub,args.SOURCE+'_' + args.LANG_TO.lower() + '.srt') \n return 0\n\n#----------------------------------------------------------------------------------------------------------------------------------\ndef generateSubFile(args,_filename=None): \n if _filename is None:\n _source = args.SOURCE\n else:\n _source = _filename\n if _source[-4:]=='.srt': \n substring = open(_source,'r').read() \n generateSub(args,substring,_source.replace('.srt','_' + args.LANG_TO + '.srt')) \n else:\n print \"Incorrect file format\"\n return -1\n\n#----------------------------------------------------------------------------------------------------------------------------------\ndef generateSubFolder(args): \n _source = args.SOURCE if args.SOURCE[-1:]=='/' else args.SOURCE + '/'\n if os.path.isdir(args.SOURCE):\n for root, dirs, files in os.walk(args.SOURCE): \n for f in files: \n if f[-4:]=='.srt':\n substring = open(root + f if root[-1:]=='/' else root + '/' + f,'r').read() \n generateSub(args,substring,f.replace('.srt','_' + args.LANG_TO + '.srt')) \n else:\n print \"Incorrect file format\"\n return -1\n\n#----------------------------------------------------------------------------------------------------------------------------------\ndef main():\n parser = ArgumentParser(description='Translate subtitle from media id, file or folder', parents=[]) \n parser.add_argument('-v', '--verbose', action='store_true', dest='VERBOSE', default=False, help='Verbose') \n parser.add_argument('-t', '--sourceType', type=str, dest='SOURCE_TYPE', help='source type, pick between media|file|folder')\n parser.add_argument('-s', '--source', type=str, dest='SOURCE', help='source of the subtitle/s')\n parser.add_argument('-langf', '--langFrom', type=str, dest='LANG_FROM', default='es', help='Language that we want to translate')\n parser.add_argument('-langt', '--langTo', type=str, dest='LANG_TO', default='en', help='Language of the output subtitle') \n parser.add_argument('-o', '--output', type=str, dest='OUTPUT', default='./', help='Output folder to store the result') \n args = parser.parse_args() \n \n \n if (args.SOURCE_TYPE.lower()=='file'):\n try: \n generateSubFile(args)\n except:\n return -1 \n elif (args.SOURCE_TYPE.lower()=='folder'):\n try:\n generateSubFolder(args)\n except:\n return -1\n elif (args.SOURCE_TYPE.lower()=='media'):\n try: \n generateSubMedia(args)\n except: \n return -1\n else:\n print \"Choose a valid source type\" \n\n return 0\n \n#---------------------------------------------------------------------------------------------------------------------------------- \nif (__name__ == '__main__'):\n main() \n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
__version__ = "1.2.0"
import hashlib
from collections import Counter
from re import findall
from secrets import choice
from string import ascii_letters, ascii_lowercase, ascii_uppercase
from string import digits as all_digits
from string import punctuation
import requests
def check_password(password):
"""Check a given password against known data breaches
Note:
This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.
Args:
password (str): The password to check
Returns:
int: The number of times the password has been found
"""
sha1 = hashlib.sha1(password.encode("utf-8")).hexdigest()
response = requests.get(f"https://api.pwnedpasswords.com/range/{sha1[:5]}")
hash_suffix_list = [x.split(":") for x in response.text.splitlines(False)]
try:
count = [
count for suffix, count in hash_suffix_list if sha1.endswith(suffix.lower())
][0]
except IndexError:
return 0
return int(count)
class PasswordRequirements:
"""A set of requirements to check passwords against
Keyword Args:
min_length (int): The minimum length of the password
min_digits (int): The minimum number of digits in the password
min_special (int): The minimum number of special characters in the password
min_alpha (int): The minimum number of alphabetical characters in the password
min_upper (int): The minimum number of uppercase letters in the password
min_lower (int): The minimum number of lowercase letters in the password
check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)
func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements
"""
def __init__(
self,
*,
min_length=0,
min_digits=0,
min_special=0,
min_alpha=0,
min_upper=0,
min_lower=0,
check_breaches=False,
func=None,
):
self.min_length = min_length
self.min_digits = min_digits
self.min_special = min_special
self.min_alpha = min_alpha
self.min_upper = min_upper
self.min_lower = min_lower
self.check_breaches = check_breaches
self.func = func
def check(self, password):
"""Check a password against the requirements
Args:
password (str): The password to check
Returns:
bool: Whether the password meets all the given requirements
"""
if len(password) < self.min_length:
return False
digits = len(findall(r"\d", password))
if digits < self.min_digits:
return False
special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)
if special_chars < self.min_special:
return False
alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)
if alpha_chars < self.min_alpha:
return False
upper_chars = sum(
v for k, v in Counter(password).items() if k in ascii_uppercase
)
if upper_chars < self.min_upper:
return False
lower_chars = sum(
v for k, v in Counter(password).items() if k in ascii_lowercase
)
if lower_chars < self.min_lower:
return False
if self.check_breaches and check_password(password):
return False
if self.func and not self.func(password):
return False
return True
class PasswordGenerator:
"""A random password generator
Args:
length (int): The length of the password
Keyword Args:
uppercase (bool): Whether to allow uppercase letters in the password
lowercase (bool): Whether to allow lowercase letters in the password
digits (bool): Whether to allow numerical digits in the password
special (bool): Whether to allow special characters in the password
"""
def __init__(
self, length, *, uppercase=True, lowercase=True, digits=True, special=True
):
self.length = length
self.uppercase = uppercase
self.lowercase = lowercase
self.digits = digits
self.special = special
def generate(
self, length=None, uppercase=None, lowercase=None, digits=None, special=None
):
"""Generate a random password
Keyword Args:
length (int): The length of the password
uppercase (bool): Whether to allow uppercase letters in the password
lowercase (bool): Whether to allow lowercase letters in the password
digits (bool): Whether to allow numerical digits in the password
special (bool): Whether to allow special characters in the password
Returns:
str: The freshly generated password
"""
if length is None:
length = self.length
allowed_chars = ""
if uppercase is not None:
allowed_chars += ascii_uppercase if uppercase else ""
elif self.uppercase:
allowed_chars += ascii_uppercase
if lowercase is not None:
allowed_chars += ascii_lowercase if lowercase else ""
elif self.lowercase:
allowed_chars += ascii_lowercase
if digits is not None:
allowed_chars += all_digits if digits else ""
elif self.digits:
allowed_chars += all_digits
if special is not None:
allowed_chars += punctuation if special else ""
elif self.special:
allowed_chars += punctuation
return "".join(choice(allowed_chars) for _ in range(length))
def __len__(self):
return self.length if self.length >= 0 else 0
|
normal
|
{
"blob_id": "eafe89de10c4187057b0cc1e0e9772f03a576b0d",
"index": 9771,
"step-1": "<mask token>\n\n\nclass PasswordGenerator:\n <mask token>\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-2": "<mask token>\n\n\nclass PasswordRequirements:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-3": "<mask token>\n\n\ndef check_password(password):\n \"\"\"Check a given password against known data breaches\n\n Note:\n This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.\n\n Args:\n password (str): The password to check\n\n Returns:\n int: The number of times the password has been found\n \"\"\"\n sha1 = hashlib.sha1(password.encode('utf-8')).hexdigest()\n response = requests.get(f'https://api.pwnedpasswords.com/range/{sha1[:5]}')\n hash_suffix_list = [x.split(':') for x in response.text.splitlines(False)]\n try:\n count = [count for suffix, count in hash_suffix_list if sha1.\n endswith(suffix.lower())][0]\n except IndexError:\n return 0\n return int(count)\n\n\nclass PasswordRequirements:\n \"\"\"A set of requirements to check passwords against\n\n Keyword Args:\n min_length (int): The minimum length of the password\n min_digits (int): The minimum number of digits in the password\n min_special (int): The minimum number of special characters in the password\n min_alpha (int): The minimum number of alphabetical characters in the password\n min_upper (int): The minimum number of uppercase letters in the password\n min_lower (int): The minimum number of lowercase letters in the password\n check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)\n func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements\n \"\"\"\n\n def __init__(self, *, min_length=0, min_digits=0, min_special=0,\n min_alpha=0, min_upper=0, min_lower=0, check_breaches=False, func=None\n ):\n self.min_length = min_length\n self.min_digits = min_digits\n self.min_special = min_special\n self.min_alpha = min_alpha\n self.min_upper = min_upper\n self.min_lower = min_lower\n self.check_breaches = check_breaches\n self.func = func\n\n def check(self, password):\n \"\"\"Check a password against the requirements\n\n Args:\n password (str): The password to check\n\n Returns:\n bool: Whether the password meets all the given requirements\n \"\"\"\n if len(password) < self.min_length:\n return False\n digits = len(findall('\\\\d', password))\n if digits < self.min_digits:\n return False\n special_chars = sum(v for k, v in Counter(password).items() if k in\n punctuation)\n if special_chars < self.min_special:\n return False\n alpha_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n upper_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_uppercase)\n if upper_chars < self.min_upper:\n return False\n lower_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_lowercase)\n if lower_chars < self.min_lower:\n return False\n if self.check_breaches and check_password(password):\n return False\n if self.func and not self.func(password):\n return False\n return True\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-4": "__version__ = '1.2.0'\nimport hashlib\nfrom collections import Counter\nfrom re import findall\nfrom secrets import choice\nfrom string import ascii_letters, ascii_lowercase, ascii_uppercase\nfrom string import digits as all_digits\nfrom string import punctuation\nimport requests\n\n\ndef check_password(password):\n \"\"\"Check a given password against known data breaches\n\n Note:\n This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.\n\n Args:\n password (str): The password to check\n\n Returns:\n int: The number of times the password has been found\n \"\"\"\n sha1 = hashlib.sha1(password.encode('utf-8')).hexdigest()\n response = requests.get(f'https://api.pwnedpasswords.com/range/{sha1[:5]}')\n hash_suffix_list = [x.split(':') for x in response.text.splitlines(False)]\n try:\n count = [count for suffix, count in hash_suffix_list if sha1.\n endswith(suffix.lower())][0]\n except IndexError:\n return 0\n return int(count)\n\n\nclass PasswordRequirements:\n \"\"\"A set of requirements to check passwords against\n\n Keyword Args:\n min_length (int): The minimum length of the password\n min_digits (int): The minimum number of digits in the password\n min_special (int): The minimum number of special characters in the password\n min_alpha (int): The minimum number of alphabetical characters in the password\n min_upper (int): The minimum number of uppercase letters in the password\n min_lower (int): The minimum number of lowercase letters in the password\n check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)\n func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements\n \"\"\"\n\n def __init__(self, *, min_length=0, min_digits=0, min_special=0,\n min_alpha=0, min_upper=0, min_lower=0, check_breaches=False, func=None\n ):\n self.min_length = min_length\n self.min_digits = min_digits\n self.min_special = min_special\n self.min_alpha = min_alpha\n self.min_upper = min_upper\n self.min_lower = min_lower\n self.check_breaches = check_breaches\n self.func = func\n\n def check(self, password):\n \"\"\"Check a password against the requirements\n\n Args:\n password (str): The password to check\n\n Returns:\n bool: Whether the password meets all the given requirements\n \"\"\"\n if len(password) < self.min_length:\n return False\n digits = len(findall('\\\\d', password))\n if digits < self.min_digits:\n return False\n special_chars = sum(v for k, v in Counter(password).items() if k in\n punctuation)\n if special_chars < self.min_special:\n return False\n alpha_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n upper_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_uppercase)\n if upper_chars < self.min_upper:\n return False\n lower_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_lowercase)\n if lower_chars < self.min_lower:\n return False\n if self.check_breaches and check_password(password):\n return False\n if self.func and not self.func(password):\n return False\n return True\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-5": "__version__ = \"1.2.0\"\n\nimport hashlib\nfrom collections import Counter\nfrom re import findall\nfrom secrets import choice\nfrom string import ascii_letters, ascii_lowercase, ascii_uppercase\nfrom string import digits as all_digits\nfrom string import punctuation\n\nimport requests\n\n\ndef check_password(password):\n \"\"\"Check a given password against known data breaches\n\n Note:\n This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.\n\n Args:\n password (str): The password to check\n\n Returns:\n int: The number of times the password has been found\n \"\"\"\n\n sha1 = hashlib.sha1(password.encode(\"utf-8\")).hexdigest()\n\n response = requests.get(f\"https://api.pwnedpasswords.com/range/{sha1[:5]}\")\n\n hash_suffix_list = [x.split(\":\") for x in response.text.splitlines(False)]\n\n try:\n count = [\n count for suffix, count in hash_suffix_list if sha1.endswith(suffix.lower())\n ][0]\n except IndexError:\n return 0\n\n return int(count)\n\n\nclass PasswordRequirements:\n \"\"\"A set of requirements to check passwords against\n\n Keyword Args:\n min_length (int): The minimum length of the password\n min_digits (int): The minimum number of digits in the password\n min_special (int): The minimum number of special characters in the password\n min_alpha (int): The minimum number of alphabetical characters in the password\n min_upper (int): The minimum number of uppercase letters in the password\n min_lower (int): The minimum number of lowercase letters in the password\n check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)\n func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements\n \"\"\"\n\n def __init__(\n self,\n *,\n min_length=0,\n min_digits=0,\n min_special=0,\n min_alpha=0,\n min_upper=0,\n min_lower=0,\n check_breaches=False,\n func=None,\n ):\n self.min_length = min_length\n self.min_digits = min_digits\n self.min_special = min_special\n self.min_alpha = min_alpha\n self.min_upper = min_upper\n self.min_lower = min_lower\n self.check_breaches = check_breaches\n self.func = func\n\n def check(self, password):\n \"\"\"Check a password against the requirements\n\n Args:\n password (str): The password to check\n\n Returns:\n bool: Whether the password meets all the given requirements\n \"\"\"\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(\n self, length, *, uppercase=True, lowercase=True, digits=True, special=True\n ):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(\n self, length=None, uppercase=None, lowercase=None, digits=None, special=None\n ):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n \n allowed_chars = \"\"\n\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else \"\"\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else \"\"\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n\n if digits is not None:\n allowed_chars += all_digits if digits else \"\"\n elif self.digits:\n allowed_chars += all_digits\n\n if special is not None:\n allowed_chars += punctuation if special else \"\"\n elif self.special:\n allowed_chars += punctuation\n\n return \"\".join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-ids": [
4,
6,
10,
12,
13
]
}
|
[
4,
6,
10,
12,
13
] |
import os
import numpy as np
import pandas as pd
import random
import platform
import subprocess
import shlex
import teradata
from joblib import dump
import shutil
from tqdm import tqdm
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1', logConsole=False)
session = udaExec.connect(method='odbc',
system=db, # Сервер ТД из файла
username=usr, # Логин TD
password=pwd, # Пароль TD
driver = driver,
charset='UTF8',
autoCommit='True',
USEREGIONALSETTINGS='N',
transactionMode = 'TERADATA'
)
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser("~")
config_path = os.path.join(path, ".twbcfg.ini")
log_path = os.path.join(path, "tmp", "teradata_logs")
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = f'''CheckpointDirectory='{log_path}'
LogDirectory='{log_path}' '''
with open(config_path, 'w') as f:
f.write(config)
def td_download(query="",
bd="tdsb15.cgs.sbrf.ru",
username="", password="",
fast=False, return_df=False, csv=True,
chunksize=100000):
"""
Функция возвращает данные из ТД: путь к csv или датафрейм.
fast=True - использовать утилиты ТД, False - ODBC;
return_df - вернуть датафрейм;
csv - записать данные в файл при fast=False;
chunksize - размер бача для ODBC;
query должен содержать where, чтобы выгрузить название столбцов из БД
"""
local_seed = str(random.randint(0, 1000000))
query = query.replace("\n", " ")
if not fast:
# Teradata python package
session = get_session(bd, username, password)
frame = sql2df(query, session, chunksize=chunksize)
session.close()
if return_df:
return frame
else:
path_to_file = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)
if csv:
filename = path_to_file + ".csv"
frame.to_csv(filename, sep=';', index=False, encoding="utf8")
return filename
else:
dump(frame, path_to_file)
return path_to_file
else:
# FastLoad
check_config()
query = query.replace("'", "''") # prepair query for FastLoad
path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
os.mkdir(path_to_folder)
else:
os.mkdir(path_to_folder)
path_to_file = os.path.join(path_to_folder, 'dataset.csv')
open(path_to_file, 'w').close()
# Create utility files
txt = '''SourceTdpId = '%s'
,SourceUserName = '%s'
,SourceUserPassword = '%s'
,DDLPrivateLogName = 'ddlprivate.log'
,ExportPrivateLogName = 'exportprivate.log'
,TargetErrorList = ['3807']
,TargetFileName = '%s'
,TargetFormat = 'delimited'
,TargetTextDelimiter = ';'
,TargetOpenMode = 'write'
,SelectStmt = '%s' ''' % (bd, username, password, path_to_file, query)
qtxt = '''USING CHAR SET UTF-8
DEFINE JOB qstart2
(
APPLY TO OPERATOR ($FILE_WRITER)
SELECT * FROM OPERATOR($EXPORT);
);'''
with open(path_to_folder + '/qstart2.txt', 'w+') as f:
f.write(qtxt)
with open(path_to_folder + '/jobvars.txt', 'w+') as f:
f.write(txt)
# run FastLoad
# p = subprocess.Popen(
# shlex.split(f"tbuild -f {path_to_folder}/qstart2.txt -v {path_to_folder}/jobvars.txt -j qstart2")
# )
# p.wait()
p = subprocess.run(
shlex.split(f"tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# columns names
query = query.replace("\n", " ").replace("''","'")
query = query.lower()
query_list = query.split("where")
if len(query_list) == 2:
columns_query = " where 1=0 and ".join(query_list)
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist()
session.close()
else:
print("Coudn't load columns names")
columns_names = None
if not return_df:
if columns_names:
with open(path_to_folder + '/columns_names.txt', 'w') as f:
f.write("\n".join(columns_names))
return path_to_file
else:
if columns_names:
frame = pd.read_csv(path_to_file, names=columns_names, delimiter=';')
else:
frame = pd.read_csv(path_to_file, header=None, delimiter=';')
return frame
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
def td_import(
username="", password="",
bd="tdsb15.cgs.sbrf.ru", tbl_name="",
schema="SBX_RETAIL_MP_PFM",
loadframe=True, df=None, path_to_file=None, fast=False,
batch_size=12000, max_sessions=6, buffersize=524288,
):
"""
Функция записывате данные в ТД через утилиты или ODBC
"""
table = schema + "." + tbl_name
if not fast:
if not loadframe:
df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=False)
# insert
n_iters = len(df) // batch_size + (len(df) % batch_size > 0)
df_dict = df.to_dict('records')
session = get_session(bd, username, password)
for i in tqdm(range(n_iters), total=n_iters):
session.executemany(
f"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})",
[list(row.values()) for row in df_dict[i * batch_size:i * batch_size + batch_size]],
batch=True
)
session.close()
else:
check_config()
local_seed = str(random.randint(0, 1000000))
path_to_folder = os.path.join(os.getcwd(), "data", "output_" + local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
else:
os.mkdir(path_to_folder)
if loadframe:
converted = df.replace(np.NaN, '').astype(str)
path_to_file = path_to_folder + '/tmp.csv'
converted.to_csv(path_to_file, index=False, header=False, sep=";", encoding="utf8")
converted_len = converted.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()
else:
converted_len = pd.read_csv(path_to_file, sep=';', dtype="str", header=None, encoding="utf8",
low_memory=False, nrows=100000)
columns_query = f"select * from {table} where 1=0"
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist()
session.close()
shutil.copy(path_to_file, path_to_folder + "/tmp.csv") # cp file for correct working Change to move&
converted_len.columns = columns_names
converted_len = converted_len.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()
# create empty tmp table
td_temp_table = table + "_tmp_" + local_seed # change schema
session = get_session(bd, username, password)
session.execute(
f"create multiset table {td_temp_table} as {table} with no data no primary index"
)
session.close()
# Create utility file
txt = f"""USING CHARACTER SET UTF8
DEFINE JOB teradata_upload
Description 'Fastload script'
(
DEFINE OPERATOR Load_operator
TYPE LOAD
SCHEMA *
ATTRIBUTES
(
VARCHAR TdPid='{bd}',
VARCHAR UserName='{username}',
VARCHAR UserPassWord='{password}',
VARCHAR TargetTable='{td_temp_table}',
VARCHAR LogTable='{schema}.usr_tpt_log',
VARCHAR DateForm='AnsiDate',
INTEGER MaxSessions={max_sessions}
);
DEFINE SCHEMA Define_Employee_Schema
(
{','.join(f'{key} VARCHAR({max(1, value*2)})' for key, value in converted_len.items())}
);
DEFINE OPERATOR Producer_File_Detail
TYPE DATACONNECTOR PRODUCER
SCHEMA Define_Employee_Schema
ATTRIBUTES
(
VARCHAR DirectoryPath='{path_to_folder}/'
, VARCHAR FileName='tmp.csv'
, VARCHAR TextDelimiter=';'
, VARCHAR QuotedData = 'Optional'
, VARCHAR OpenQuoteMark = '"'
, VARCHAR CloseQuoteMark = '"'
, VARCHAR Format='Delimited'
, VARCHAR OpenMode='Read'
, VARCHAR INDICATORMODE='N'
, INTEGER BUFFERSIZE = {buffersize}
);
APPLY
(
'INSERT INTO {td_temp_table}({','.join(
f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(
f'{key}' for key, value in converted_len.items())});'
)
TO OPERATOR(Load_operator)
SELECT * FROM OPERATOR (Producer_File_Detail);
);"""
with open(path_to_folder + '/load_code.tpt', 'w+') as f:
f.write(txt)
# Start TPT load
p = subprocess.Popen(
shlex.split(f"tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}")
)
p.wait()
# Merge
print("Merging in Teradata... \r", end='', flush=True)
session = get_session(bd, username, password)
session.execute(f"insert into {table} sel * from {td_temp_table}")
session.close()
# Drop temporary table
print("Cleaning... \r", end='', flush=True)
session = get_session(bd, username, password)
session.execute(f"drop table {td_temp_table}")
session.close()
# Cleanup
shutil.rmtree(path_to_folder)
print("Done!")
|
normal
|
{
"blob_id": "a05c94ae0ee41cfef5687f741e07a54ae793e40d",
"index": 2183,
"step-1": "<mask token>\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\n<mask token>\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\n<mask token>\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\ndef td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=\n '', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=\n None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):\n \"\"\"\n Функция записывате данные в ТД через утилиты или ODBC\n\n \"\"\"\n table = schema + '.' + tbl_name\n if not fast:\n if not loadframe:\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=\n False)\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\n df_dict = df.to_dict('records')\n session = get_session(bd, username, password)\n for i in tqdm(range(n_iters), total=n_iters):\n session.executemany(\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\"\n , [list(row.values()) for row in df_dict[i * batch_size:i *\n batch_size + batch_size]], batch=True)\n session.close()\n else:\n check_config()\n local_seed = str(random.randint(0, 1000000))\n path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n if loadframe:\n converted = df.replace(np.NaN, '').astype(str)\n path_to_file = path_to_folder + '/tmp.csv'\n converted.to_csv(path_to_file, index=False, header=False, sep=\n ';', encoding='utf8')\n converted_len = converted.apply(lambda x: x.str.encode('utf-8')\n .apply(len)).max().to_dict()\n else:\n converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',\n header=None, encoding='utf8', low_memory=False, nrows=100000)\n columns_query = f'select * from {table} where 1=0'\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n shutil.copy(path_to_file, path_to_folder + '/tmp.csv')\n converted_len.columns = columns_names\n converted_len = converted_len.apply(lambda x: x.str.encode(\n 'utf-8').apply(len)).max().to_dict()\n td_temp_table = table + '_tmp_' + local_seed\n session = get_session(bd, username, password)\n session.execute(\n f'create multiset table {td_temp_table} as {table} with no data no primary index'\n )\n session.close()\n txt = f\"\"\"USING CHARACTER SET UTF8\n DEFINE JOB teradata_upload\n Description 'Fastload script'\n (\n DEFINE OPERATOR Load_operator\n TYPE LOAD\n SCHEMA *\n ATTRIBUTES\n (\n VARCHAR TdPid='{bd}',\n VARCHAR UserName='{username}',\n VARCHAR UserPassWord='{password}',\n VARCHAR TargetTable='{td_temp_table}',\n VARCHAR LogTable='{schema}.usr_tpt_log',\n VARCHAR DateForm='AnsiDate',\n INTEGER MaxSessions={max_sessions}\n );\n\n DEFINE SCHEMA Define_Employee_Schema\n (\n {','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())} \n );\n\n DEFINE OPERATOR Producer_File_Detail\n TYPE DATACONNECTOR PRODUCER\n SCHEMA Define_Employee_Schema\n ATTRIBUTES\n (\n VARCHAR DirectoryPath='{path_to_folder}/'\n , VARCHAR FileName='tmp.csv'\n , VARCHAR TextDelimiter=';'\n , VARCHAR QuotedData = 'Optional'\n , VARCHAR OpenQuoteMark = '\"'\n , VARCHAR CloseQuoteMark = '\"'\n , VARCHAR Format='Delimited'\n , VARCHAR OpenMode='Read'\n , VARCHAR INDICATORMODE='N'\n , INTEGER BUFFERSIZE = {buffersize}\n );\n\n APPLY\n (\n 'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'\n )\n TO OPERATOR(Load_operator)\n\n SELECT * FROM OPERATOR (Producer_File_Detail);\n );\"\"\"\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\n f.write(txt)\n p = subprocess.Popen(shlex.split(\n f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))\n p.wait()\n print('Merging in Teradata... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'insert into {table} sel * from {td_temp_table}')\n session.close()\n print('Cleaning... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'drop table {td_temp_table}')\n session.close()\n shutil.rmtree(path_to_folder)\n print('Done!')\n",
"step-3": "<mask token>\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\ndef td_download(query='', bd='tdsb15.cgs.sbrf.ru', username='', password='',\n fast=False, return_df=False, csv=True, chunksize=100000):\n \"\"\"\n Функция возвращает данные из ТД: путь к csv или датафрейм.\n\n fast=True - использовать утилиты ТД, False - ODBC;\n return_df - вернуть датафрейм;\n csv - записать данные в файл при fast=False;\n chunksize - размер бача для ODBC;\n query должен содержать where, чтобы выгрузить название столбцов из БД\n\n \"\"\"\n local_seed = str(random.randint(0, 1000000))\n query = query.replace('\\n', ' ')\n if not fast:\n session = get_session(bd, username, password)\n frame = sql2df(query, session, chunksize=chunksize)\n session.close()\n if return_df:\n return frame\n else:\n path_to_file = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if csv:\n filename = path_to_file + '.csv'\n frame.to_csv(filename, sep=';', index=False, encoding='utf8')\n return filename\n else:\n dump(frame, path_to_file)\n return path_to_file\n else:\n check_config()\n query = query.replace(\"'\", \"''\")\n path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n os.mkdir(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n path_to_file = os.path.join(path_to_folder, 'dataset.csv')\n open(path_to_file, 'w').close()\n txt = (\n \"\"\"SourceTdpId = '%s'\n ,SourceUserName = '%s' \n ,SourceUserPassword = '%s'\n ,DDLPrivateLogName = 'ddlprivate.log'\n ,ExportPrivateLogName = 'exportprivate.log'\n ,TargetErrorList = ['3807']\n ,TargetFileName = '%s'\n ,TargetFormat = 'delimited'\n ,TargetTextDelimiter = ';'\n ,TargetOpenMode = 'write'\n ,SelectStmt = '%s' \"\"\"\n % (bd, username, password, path_to_file, query))\n qtxt = \"\"\"USING CHAR SET UTF-8\n DEFINE JOB qstart2\n (\n APPLY TO OPERATOR ($FILE_WRITER)\n SELECT * FROM OPERATOR($EXPORT);\n );\"\"\"\n with open(path_to_folder + '/qstart2.txt', 'w+') as f:\n f.write(qtxt)\n with open(path_to_folder + '/jobvars.txt', 'w+') as f:\n f.write(txt)\n p = subprocess.run(shlex.split(\n f'tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}'\n ), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n query = query.replace('\\n', ' ').replace(\"''\", \"'\")\n query = query.lower()\n query_list = query.split('where')\n if len(query_list) == 2:\n columns_query = ' where 1=0 and '.join(query_list)\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n else:\n print(\"Coudn't load columns names\")\n columns_names = None\n if not return_df:\n if columns_names:\n with open(path_to_folder + '/columns_names.txt', 'w') as f:\n f.write('\\n'.join(columns_names))\n return path_to_file\n else:\n if columns_names:\n frame = pd.read_csv(path_to_file, names=columns_names,\n delimiter=';')\n else:\n frame = pd.read_csv(path_to_file, header=None, delimiter=';')\n return frame\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\ndef td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=\n '', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=\n None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):\n \"\"\"\n Функция записывате данные в ТД через утилиты или ODBC\n\n \"\"\"\n table = schema + '.' + tbl_name\n if not fast:\n if not loadframe:\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=\n False)\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\n df_dict = df.to_dict('records')\n session = get_session(bd, username, password)\n for i in tqdm(range(n_iters), total=n_iters):\n session.executemany(\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\"\n , [list(row.values()) for row in df_dict[i * batch_size:i *\n batch_size + batch_size]], batch=True)\n session.close()\n else:\n check_config()\n local_seed = str(random.randint(0, 1000000))\n path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n if loadframe:\n converted = df.replace(np.NaN, '').astype(str)\n path_to_file = path_to_folder + '/tmp.csv'\n converted.to_csv(path_to_file, index=False, header=False, sep=\n ';', encoding='utf8')\n converted_len = converted.apply(lambda x: x.str.encode('utf-8')\n .apply(len)).max().to_dict()\n else:\n converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',\n header=None, encoding='utf8', low_memory=False, nrows=100000)\n columns_query = f'select * from {table} where 1=0'\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n shutil.copy(path_to_file, path_to_folder + '/tmp.csv')\n converted_len.columns = columns_names\n converted_len = converted_len.apply(lambda x: x.str.encode(\n 'utf-8').apply(len)).max().to_dict()\n td_temp_table = table + '_tmp_' + local_seed\n session = get_session(bd, username, password)\n session.execute(\n f'create multiset table {td_temp_table} as {table} with no data no primary index'\n )\n session.close()\n txt = f\"\"\"USING CHARACTER SET UTF8\n DEFINE JOB teradata_upload\n Description 'Fastload script'\n (\n DEFINE OPERATOR Load_operator\n TYPE LOAD\n SCHEMA *\n ATTRIBUTES\n (\n VARCHAR TdPid='{bd}',\n VARCHAR UserName='{username}',\n VARCHAR UserPassWord='{password}',\n VARCHAR TargetTable='{td_temp_table}',\n VARCHAR LogTable='{schema}.usr_tpt_log',\n VARCHAR DateForm='AnsiDate',\n INTEGER MaxSessions={max_sessions}\n );\n\n DEFINE SCHEMA Define_Employee_Schema\n (\n {','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())} \n );\n\n DEFINE OPERATOR Producer_File_Detail\n TYPE DATACONNECTOR PRODUCER\n SCHEMA Define_Employee_Schema\n ATTRIBUTES\n (\n VARCHAR DirectoryPath='{path_to_folder}/'\n , VARCHAR FileName='tmp.csv'\n , VARCHAR TextDelimiter=';'\n , VARCHAR QuotedData = 'Optional'\n , VARCHAR OpenQuoteMark = '\"'\n , VARCHAR CloseQuoteMark = '\"'\n , VARCHAR Format='Delimited'\n , VARCHAR OpenMode='Read'\n , VARCHAR INDICATORMODE='N'\n , INTEGER BUFFERSIZE = {buffersize}\n );\n\n APPLY\n (\n 'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'\n )\n TO OPERATOR(Load_operator)\n\n SELECT * FROM OPERATOR (Producer_File_Detail);\n );\"\"\"\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\n f.write(txt)\n p = subprocess.Popen(shlex.split(\n f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))\n p.wait()\n print('Merging in Teradata... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'insert into {table} sel * from {td_temp_table}')\n session.close()\n print('Cleaning... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'drop table {td_temp_table}')\n session.close()\n shutil.rmtree(path_to_folder)\n print('Done!')\n",
"step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport random\nimport platform\nimport subprocess\nimport shlex\nimport teradata\nfrom joblib import dump\nimport shutil\nfrom tqdm import tqdm\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\ndef td_download(query='', bd='tdsb15.cgs.sbrf.ru', username='', password='',\n fast=False, return_df=False, csv=True, chunksize=100000):\n \"\"\"\n Функция возвращает данные из ТД: путь к csv или датафрейм.\n\n fast=True - использовать утилиты ТД, False - ODBC;\n return_df - вернуть датафрейм;\n csv - записать данные в файл при fast=False;\n chunksize - размер бача для ODBC;\n query должен содержать where, чтобы выгрузить название столбцов из БД\n\n \"\"\"\n local_seed = str(random.randint(0, 1000000))\n query = query.replace('\\n', ' ')\n if not fast:\n session = get_session(bd, username, password)\n frame = sql2df(query, session, chunksize=chunksize)\n session.close()\n if return_df:\n return frame\n else:\n path_to_file = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if csv:\n filename = path_to_file + '.csv'\n frame.to_csv(filename, sep=';', index=False, encoding='utf8')\n return filename\n else:\n dump(frame, path_to_file)\n return path_to_file\n else:\n check_config()\n query = query.replace(\"'\", \"''\")\n path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n os.mkdir(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n path_to_file = os.path.join(path_to_folder, 'dataset.csv')\n open(path_to_file, 'w').close()\n txt = (\n \"\"\"SourceTdpId = '%s'\n ,SourceUserName = '%s' \n ,SourceUserPassword = '%s'\n ,DDLPrivateLogName = 'ddlprivate.log'\n ,ExportPrivateLogName = 'exportprivate.log'\n ,TargetErrorList = ['3807']\n ,TargetFileName = '%s'\n ,TargetFormat = 'delimited'\n ,TargetTextDelimiter = ';'\n ,TargetOpenMode = 'write'\n ,SelectStmt = '%s' \"\"\"\n % (bd, username, password, path_to_file, query))\n qtxt = \"\"\"USING CHAR SET UTF-8\n DEFINE JOB qstart2\n (\n APPLY TO OPERATOR ($FILE_WRITER)\n SELECT * FROM OPERATOR($EXPORT);\n );\"\"\"\n with open(path_to_folder + '/qstart2.txt', 'w+') as f:\n f.write(qtxt)\n with open(path_to_folder + '/jobvars.txt', 'w+') as f:\n f.write(txt)\n p = subprocess.run(shlex.split(\n f'tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}'\n ), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n query = query.replace('\\n', ' ').replace(\"''\", \"'\")\n query = query.lower()\n query_list = query.split('where')\n if len(query_list) == 2:\n columns_query = ' where 1=0 and '.join(query_list)\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n else:\n print(\"Coudn't load columns names\")\n columns_names = None\n if not return_df:\n if columns_names:\n with open(path_to_folder + '/columns_names.txt', 'w') as f:\n f.write('\\n'.join(columns_names))\n return path_to_file\n else:\n if columns_names:\n frame = pd.read_csv(path_to_file, names=columns_names,\n delimiter=';')\n else:\n frame = pd.read_csv(path_to_file, header=None, delimiter=';')\n return frame\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\ndef td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=\n '', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=\n None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):\n \"\"\"\n Функция записывате данные в ТД через утилиты или ODBC\n\n \"\"\"\n table = schema + '.' + tbl_name\n if not fast:\n if not loadframe:\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=\n False)\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\n df_dict = df.to_dict('records')\n session = get_session(bd, username, password)\n for i in tqdm(range(n_iters), total=n_iters):\n session.executemany(\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\"\n , [list(row.values()) for row in df_dict[i * batch_size:i *\n batch_size + batch_size]], batch=True)\n session.close()\n else:\n check_config()\n local_seed = str(random.randint(0, 1000000))\n path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n if loadframe:\n converted = df.replace(np.NaN, '').astype(str)\n path_to_file = path_to_folder + '/tmp.csv'\n converted.to_csv(path_to_file, index=False, header=False, sep=\n ';', encoding='utf8')\n converted_len = converted.apply(lambda x: x.str.encode('utf-8')\n .apply(len)).max().to_dict()\n else:\n converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',\n header=None, encoding='utf8', low_memory=False, nrows=100000)\n columns_query = f'select * from {table} where 1=0'\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n shutil.copy(path_to_file, path_to_folder + '/tmp.csv')\n converted_len.columns = columns_names\n converted_len = converted_len.apply(lambda x: x.str.encode(\n 'utf-8').apply(len)).max().to_dict()\n td_temp_table = table + '_tmp_' + local_seed\n session = get_session(bd, username, password)\n session.execute(\n f'create multiset table {td_temp_table} as {table} with no data no primary index'\n )\n session.close()\n txt = f\"\"\"USING CHARACTER SET UTF8\n DEFINE JOB teradata_upload\n Description 'Fastload script'\n (\n DEFINE OPERATOR Load_operator\n TYPE LOAD\n SCHEMA *\n ATTRIBUTES\n (\n VARCHAR TdPid='{bd}',\n VARCHAR UserName='{username}',\n VARCHAR UserPassWord='{password}',\n VARCHAR TargetTable='{td_temp_table}',\n VARCHAR LogTable='{schema}.usr_tpt_log',\n VARCHAR DateForm='AnsiDate',\n INTEGER MaxSessions={max_sessions}\n );\n\n DEFINE SCHEMA Define_Employee_Schema\n (\n {','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())} \n );\n\n DEFINE OPERATOR Producer_File_Detail\n TYPE DATACONNECTOR PRODUCER\n SCHEMA Define_Employee_Schema\n ATTRIBUTES\n (\n VARCHAR DirectoryPath='{path_to_folder}/'\n , VARCHAR FileName='tmp.csv'\n , VARCHAR TextDelimiter=';'\n , VARCHAR QuotedData = 'Optional'\n , VARCHAR OpenQuoteMark = '\"'\n , VARCHAR CloseQuoteMark = '\"'\n , VARCHAR Format='Delimited'\n , VARCHAR OpenMode='Read'\n , VARCHAR INDICATORMODE='N'\n , INTEGER BUFFERSIZE = {buffersize}\n );\n\n APPLY\n (\n 'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'\n )\n TO OPERATOR(Load_operator)\n\n SELECT * FROM OPERATOR (Producer_File_Detail);\n );\"\"\"\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\n f.write(txt)\n p = subprocess.Popen(shlex.split(\n f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))\n p.wait()\n print('Merging in Teradata... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'insert into {table} sel * from {td_temp_table}')\n session.close()\n print('Cleaning... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'drop table {td_temp_table}')\n session.close()\n shutil.rmtree(path_to_folder)\n print('Done!')\n",
"step-5": "import os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\nimport platform\r\nimport subprocess\r\nimport shlex\r\nimport teradata\r\nfrom joblib import dump\r\nimport shutil\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef get_session(db, usr, pwd):\r\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\r\n\r\n if platform.system() == 'Windows':\r\n driver = 'Teradata'\r\n else:\r\n driver = 'Teradata Database ODBC Driver 16.20'\r\n\r\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1', logConsole=False)\r\n session = udaExec.connect(method='odbc',\r\n system=db, # Сервер ТД из файла\r\n username=usr, # Логин TD\r\n password=pwd, # Пароль TD\r\n driver = driver,\r\n charset='UTF8',\r\n autoCommit='True',\r\n USEREGIONALSETTINGS='N',\r\n transactionMode = 'TERADATA'\r\n )\r\n return session\r\n\r\n\r\ndef sql2df(query, session, chunksize=100000):\r\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\r\n db = pd.read_sql(query, session, chunksize=chunksize)\r\n data = pd.DataFrame()\r\n for x in tqdm(db):\r\n data = pd.concat([data, x])\r\n return data\r\n\r\n\r\ndef check_config():\r\n \"\"\" .twbcfg.ini to root path \"\"\"\r\n path = os.path.expanduser(\"~\")\r\n config_path = os.path.join(path, \".twbcfg.ini\")\r\n log_path = os.path.join(path, \"tmp\", \"teradata_logs\")\r\n\r\n if not os.path.exists(config_path):\r\n if not os.path.exists(log_path):\r\n os.mkdir(log_path)\r\n config = f'''CheckpointDirectory='{log_path}' \r\n LogDirectory='{log_path}' '''\r\n with open(config_path, 'w') as f:\r\n f.write(config)\r\n\r\n\r\n\r\ndef td_download(query=\"\",\r\n bd=\"tdsb15.cgs.sbrf.ru\",\r\n username=\"\", password=\"\",\r\n fast=False, return_df=False, csv=True,\r\n chunksize=100000):\r\n \"\"\"\r\n Функция возвращает данные из ТД: путь к csv или датафрейм.\r\n\r\n fast=True - использовать утилиты ТД, False - ODBC;\r\n return_df - вернуть датафрейм;\r\n csv - записать данные в файл при fast=False;\r\n chunksize - размер бача для ODBC;\r\n query должен содержать where, чтобы выгрузить название столбцов из БД\r\n\r\n \"\"\"\r\n local_seed = str(random.randint(0, 1000000))\r\n query = query.replace(\"\\n\", \" \")\r\n\r\n if not fast:\r\n # Teradata python package\r\n session = get_session(bd, username, password)\r\n frame = sql2df(query, session, chunksize=chunksize)\r\n session.close()\r\n if return_df:\r\n return frame\r\n else:\r\n path_to_file = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)\r\n if csv:\r\n filename = path_to_file + \".csv\"\r\n frame.to_csv(filename, sep=';', index=False, encoding=\"utf8\")\r\n return filename\r\n else:\r\n dump(frame, path_to_file)\r\n return path_to_file\r\n else:\r\n # FastLoad\r\n check_config()\r\n query = query.replace(\"'\", \"''\") # prepair query for FastLoad\r\n path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)\r\n\r\n if os.path.exists(path_to_folder):\r\n shutil.rmtree(path_to_folder)\r\n os.mkdir(path_to_folder)\r\n else:\r\n os.mkdir(path_to_folder)\r\n\r\n path_to_file = os.path.join(path_to_folder, 'dataset.csv')\r\n open(path_to_file, 'w').close()\r\n\r\n # Create utility files\r\n txt = '''SourceTdpId = '%s'\r\n ,SourceUserName = '%s' \r\n ,SourceUserPassword = '%s'\r\n ,DDLPrivateLogName = 'ddlprivate.log'\r\n ,ExportPrivateLogName = 'exportprivate.log'\r\n ,TargetErrorList = ['3807']\r\n ,TargetFileName = '%s'\r\n ,TargetFormat = 'delimited'\r\n ,TargetTextDelimiter = ';'\r\n ,TargetOpenMode = 'write'\r\n ,SelectStmt = '%s' ''' % (bd, username, password, path_to_file, query)\r\n qtxt = '''USING CHAR SET UTF-8\r\n DEFINE JOB qstart2\r\n (\r\n APPLY TO OPERATOR ($FILE_WRITER)\r\n SELECT * FROM OPERATOR($EXPORT);\r\n );'''\r\n with open(path_to_folder + '/qstart2.txt', 'w+') as f:\r\n f.write(qtxt)\r\n with open(path_to_folder + '/jobvars.txt', 'w+') as f:\r\n f.write(txt)\r\n # run FastLoad\r\n# p = subprocess.Popen(\r\n# shlex.split(f\"tbuild -f {path_to_folder}/qstart2.txt -v {path_to_folder}/jobvars.txt -j qstart2\")\r\n# )\r\n# p.wait()\r\n p = subprocess.run(\r\n shlex.split(f\"tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}\"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT\r\n )\r\n\r\n # columns names\r\n query = query.replace(\"\\n\", \" \").replace(\"''\",\"'\")\r\n query = query.lower()\r\n query_list = query.split(\"where\")\r\n if len(query_list) == 2:\r\n columns_query = \" where 1=0 and \".join(query_list)\r\n session = get_session(bd, username, password)\r\n columns_names = pd.read_sql(columns_query, session).columns.tolist()\r\n session.close()\r\n else:\r\n print(\"Coudn't load columns names\")\r\n columns_names = None\r\n\r\n if not return_df:\r\n if columns_names:\r\n with open(path_to_folder + '/columns_names.txt', 'w') as f:\r\n f.write(\"\\n\".join(columns_names))\r\n return path_to_file\r\n else:\r\n if columns_names:\r\n frame = pd.read_csv(path_to_file, names=columns_names, delimiter=';')\r\n else:\r\n frame = pd.read_csv(path_to_file, header=None, delimiter=';')\r\n return frame\r\n\r\n\r\ndef py2td(x):\r\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\r\n x_type = type(x)\r\n if x_type == float:\r\n if x % 1 == 0:\r\n return int(x)\r\n else:\r\n return x\r\n elif x == 'null':\r\n return None\r\n else:\r\n return x\r\n\r\n\r\ndef td_import(\r\n username=\"\", password=\"\",\r\n bd=\"tdsb15.cgs.sbrf.ru\", tbl_name=\"\",\r\n schema=\"SBX_RETAIL_MP_PFM\",\r\n loadframe=True, df=None, path_to_file=None, fast=False,\r\n batch_size=12000, max_sessions=6, buffersize=524288,\r\n):\r\n \"\"\"\r\n Функция записывате данные в ТД через утилиты или ODBC\r\n\r\n \"\"\"\r\n table = schema + \".\" + tbl_name\r\n if not fast:\r\n if not loadframe:\r\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=False)\r\n # insert\r\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\r\n df_dict = df.to_dict('records')\r\n session = get_session(bd, username, password)\r\n for i in tqdm(range(n_iters), total=n_iters):\r\n session.executemany(\r\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\",\r\n [list(row.values()) for row in df_dict[i * batch_size:i * batch_size + batch_size]],\r\n batch=True\r\n )\r\n session.close()\r\n else:\r\n check_config()\r\n local_seed = str(random.randint(0, 1000000))\r\n path_to_folder = os.path.join(os.getcwd(), \"data\", \"output_\" + local_seed)\r\n\r\n if os.path.exists(path_to_folder):\r\n shutil.rmtree(path_to_folder)\r\n else:\r\n os.mkdir(path_to_folder)\r\n\r\n if loadframe:\r\n converted = df.replace(np.NaN, '').astype(str)\r\n path_to_file = path_to_folder + '/tmp.csv'\r\n converted.to_csv(path_to_file, index=False, header=False, sep=\";\", encoding=\"utf8\")\r\n converted_len = converted.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()\r\n else:\r\n converted_len = pd.read_csv(path_to_file, sep=';', dtype=\"str\", header=None, encoding=\"utf8\",\r\n low_memory=False, nrows=100000)\r\n columns_query = f\"select * from {table} where 1=0\"\r\n session = get_session(bd, username, password)\r\n columns_names = pd.read_sql(columns_query, session).columns.tolist()\r\n session.close()\r\n shutil.copy(path_to_file, path_to_folder + \"/tmp.csv\") # cp file for correct working Change to move&\r\n\r\n converted_len.columns = columns_names\r\n converted_len = converted_len.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()\r\n\r\n # create empty tmp table\r\n td_temp_table = table + \"_tmp_\" + local_seed # change schema\r\n session = get_session(bd, username, password)\r\n session.execute(\r\n f\"create multiset table {td_temp_table} as {table} with no data no primary index\"\r\n )\r\n session.close()\r\n # Create utility file\r\n txt = f\"\"\"USING CHARACTER SET UTF8\r\n DEFINE JOB teradata_upload\r\n Description 'Fastload script'\r\n (\r\n DEFINE OPERATOR Load_operator\r\n TYPE LOAD\r\n SCHEMA *\r\n ATTRIBUTES\r\n (\r\n VARCHAR TdPid='{bd}',\r\n VARCHAR UserName='{username}',\r\n VARCHAR UserPassWord='{password}',\r\n VARCHAR TargetTable='{td_temp_table}',\r\n VARCHAR LogTable='{schema}.usr_tpt_log',\r\n VARCHAR DateForm='AnsiDate',\r\n INTEGER MaxSessions={max_sessions}\r\n );\r\n\r\n DEFINE SCHEMA Define_Employee_Schema\r\n (\r\n {','.join(f'{key} VARCHAR({max(1, value*2)})' for key, value in converted_len.items())} \r\n );\r\n\r\n DEFINE OPERATOR Producer_File_Detail\r\n TYPE DATACONNECTOR PRODUCER\r\n SCHEMA Define_Employee_Schema\r\n ATTRIBUTES\r\n (\r\n VARCHAR DirectoryPath='{path_to_folder}/'\r\n , VARCHAR FileName='tmp.csv'\r\n , VARCHAR TextDelimiter=';'\r\n , VARCHAR QuotedData = 'Optional'\r\n , VARCHAR OpenQuoteMark = '\"'\r\n , VARCHAR CloseQuoteMark = '\"'\r\n , VARCHAR Format='Delimited'\r\n , VARCHAR OpenMode='Read'\r\n , VARCHAR INDICATORMODE='N'\r\n , INTEGER BUFFERSIZE = {buffersize}\r\n );\r\n\r\n APPLY\r\n (\r\n 'INSERT INTO {td_temp_table}({','.join(\r\n f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(\r\n f'{key}' for key, value in converted_len.items())});'\r\n )\r\n TO OPERATOR(Load_operator)\r\n\r\n SELECT * FROM OPERATOR (Producer_File_Detail);\r\n );\"\"\"\r\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\r\n f.write(txt)\r\n # Start TPT load\r\n p = subprocess.Popen(\r\n shlex.split(f\"tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}\")\r\n )\r\n p.wait()\r\n # Merge\r\n print(\"Merging in Teradata... \\r\", end='', flush=True)\r\n session = get_session(bd, username, password)\r\n session.execute(f\"insert into {table} sel * from {td_temp_table}\")\r\n session.close()\r\n # Drop temporary table\r\n print(\"Cleaning... \\r\", end='', flush=True)\r\n session = get_session(bd, username, password)\r\n session.execute(f\"drop table {td_temp_table}\")\r\n session.close()\r\n # Cleanup\r\n shutil.rmtree(path_to_folder)\r\n print(\"Done!\")\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
'''It can be seen that the number, 125874, and its double, 251748, contain
exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain
the same digits.
'''
import common
import itertools
def digits(x):
return set(int(d) for d in str(x))
common.assertEquals(digits(125874), digits(251748))
def same_digits_as_multiples(x, multiples):
d = digits(x)
# duplicate digits are implicitly forbidden
if len(d) != len(str(x)): return False
for i in multiples:
if d != digits(i*x):
return False
return True
common.assertEquals(True, same_digits_as_multiples(125874, [2]))
common.assertEquals(False, same_digits_as_multiples(123456, [2]))
def euler052():
multiples = range(2,7)
for i in itertools.count(10**5): # solution must have at least 6 digits
if same_digits_as_multiples(i, multiples):
return i
common.submit(euler052(), expected=142857)
|
normal
|
{
"blob_id": "2ec8b9a92f8dd42faf99f0cd569ebf356e12c1d6",
"index": 8042,
"step-1": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\n<mask token>\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\n<mask token>\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\n<mask token>\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\ncommon.assertEquals(digits(125874), digits(251748))\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\ncommon.submit(euler052(), expected=142857)\n",
"step-4": "<mask token>\nimport common\nimport itertools\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\ncommon.assertEquals(digits(125874), digits(251748))\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\ncommon.submit(euler052(), expected=142857)\n",
"step-5": "'''It can be seen that the number, 125874, and its double, 251748, contain\nexactly the same digits, but in a different order.\n\nFind the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain\nthe same digits.\n'''\n\nimport common\nimport itertools\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\ncommon.assertEquals(digits(125874), digits(251748))\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n\n # duplicate digits are implicitly forbidden\n if len(d) != len(str(x)): return False\n\n for i in multiples:\n if d != digits(i*x):\n return False\n return True\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\ndef euler052():\n multiples = range(2,7)\n for i in itertools.count(10**5): # solution must have at least 6 digits\n if same_digits_as_multiples(i, multiples):\n return i\n\ncommon.submit(euler052(), expected=142857)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import uuid
import cgi
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.apis.transactions_api import TransactionsApi
from squareconnect.apis.locations_api import LocationsApi
from squareconnect.apis.customers_api import CustomersApi
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
nonce = form.getvalue('nonce')
# Get amount data
donation = form.getvalue('amount')
boxChecked = form.getvalue('boxChecked')
firstName = form.getvalue('firstname')
lastName = form.getvalue('lastname')
email = form.getvalue('email')
# The access token to use in all Connect API requests. Use your *sandbox* access
# token if you're just testing things out.
squareconnect.configuration.access_token = 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ'
# The ID of the business location to associate processed payments with.
# See [Retrieve your business's locations]
# (https://docs.connect.squareup.com/articles/getting-started/#retrievemerchantprofile)
# for an easy way to get your business's location IDs.
# If you're testing things out, use a sandbox location ID.
location_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'
transactions_api_instance = TransactionsApi()
customers_api_instance = CustomersApi()
# Every payment you process with the SDK must have a unique idempotency key.
# If you're unsure whether a particular payment succeeded, you can reattempt
# it with the same idempotency key without worrying about double charging
# the buyer.
idempotency_key = str(uuid.uuid1())
# Monetary amounts are specified in the smallest unit of the applicable currency.
# This amount is in cents. It's also hard-coded for $1.00, which isn't very useful.
amount = {'amount': int(donation) * 100, 'currency': 'USD'}
customersList = []
# Add a customer to file
if boxChecked == "true":
heading = "Recurring Donation"
customerRequest = {'given_name': firstName, 'family_name': lastName, 'email_address': email}
try:
customerResponse = customers_api_instance.create_customer(customerRequest)
except ApiException as e:
print ("customer creation failed")
print (e)
exit()
customer = customerResponse.customer
customerCardRequest = {'card_nonce': nonce}
try:
customerCardResponse = customers_api_instance.create_customer_card(customer.id, customerCardRequest)
except:
print ("customer card creation failed")
exit()
customerCard = customerCardResponse.card
body = {'customer_id': customer.id, 'customer_card_id': customerCard.id, 'idempotency_key': idempotency_key, 'amount_money': amount}
customersList = customers_api_instance.list_customers()
else:
# To learn more about splitting transactions with additional recipients,
# see the Transactions API documentation on our [developer site]
# (https://docs.connect.squareup.com/payments/transactions/overview#mpt-overview).
heading = "One time Donation"
body = {'idempotency_key': idempotency_key, 'card_nonce': nonce, 'amount_money': amount}
# customersList = Non
# The SDK throws an exception if a Connect endpoint responds with anything besides
# a 200-level HTTP code. This block catches any exceptions that occur from the request.
try:
api_response = transactions_api_instance.charge(location_id, body)
res = api_response.transaction
except ApiException as e:
res = "Exception when calling TransactionApi->charge: {}".format(e)
# Display the result
print ('Content-type:text/html\r\n\r\n')
print ('<html>')
print ('<head>')
print ('<title>Square Payment</title>')
print ('</head>')
print ('<body>')
print ('<h2>Result: </h2>')
print( '<h2>{}</h2>'.format(heading))
print ('<p>{}</p>'.format(res))
if customersList:
print( '<h2>Customers stored on File: </h2>')
for customer in customersList.customers:
print ('<p>{}</p>'.format(customer))
print ('</body>')
print ('</html>')
|
normal
|
{
"blob_id": "bb7910af5334641fd2db7146112afaff7a2e42b9",
"index": 565,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif boxChecked == 'true':\n heading = 'Recurring Donation'\n customerRequest = {'given_name': firstName, 'family_name': lastName,\n 'email_address': email}\n try:\n customerResponse = customers_api_instance.create_customer(\n customerRequest)\n except ApiException as e:\n print('customer creation failed')\n print(e)\n exit()\n customer = customerResponse.customer\n customerCardRequest = {'card_nonce': nonce}\n try:\n customerCardResponse = customers_api_instance.create_customer_card(\n customer.id, customerCardRequest)\n except:\n print('customer card creation failed')\n exit()\n customerCard = customerCardResponse.card\n body = {'customer_id': customer.id, 'customer_card_id': customerCard.id,\n 'idempotency_key': idempotency_key, 'amount_money': amount}\n customersList = customers_api_instance.list_customers()\nelse:\n heading = 'One time Donation'\n body = {'idempotency_key': idempotency_key, 'card_nonce': nonce,\n 'amount_money': amount}\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = 'Exception when calling TransactionApi->charge: {}'.format(e)\nprint('Content-type:text/html\\r\\n\\r\\n')\nprint('<html>')\nprint('<head>')\nprint('<title>Square Payment</title>')\nprint('</head>')\nprint('<body>')\nprint('<h2>Result: </h2>')\nprint('<h2>{}</h2>'.format(heading))\nprint('<p>{}</p>'.format(res))\nif customersList:\n print('<h2>Customers stored on File: </h2>')\n for customer in customersList.customers:\n print('<p>{}</p>'.format(customer))\nprint('</body>')\nprint('</html>')\n",
"step-3": "<mask token>\nform = cgi.FieldStorage()\nnonce = form.getvalue('nonce')\ndonation = form.getvalue('amount')\nboxChecked = form.getvalue('boxChecked')\nfirstName = form.getvalue('firstname')\nlastName = form.getvalue('lastname')\nemail = form.getvalue('email')\nsquareconnect.configuration.access_token = (\n 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ')\nlocation_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'\ntransactions_api_instance = TransactionsApi()\ncustomers_api_instance = CustomersApi()\nidempotency_key = str(uuid.uuid1())\namount = {'amount': int(donation) * 100, 'currency': 'USD'}\ncustomersList = []\nif boxChecked == 'true':\n heading = 'Recurring Donation'\n customerRequest = {'given_name': firstName, 'family_name': lastName,\n 'email_address': email}\n try:\n customerResponse = customers_api_instance.create_customer(\n customerRequest)\n except ApiException as e:\n print('customer creation failed')\n print(e)\n exit()\n customer = customerResponse.customer\n customerCardRequest = {'card_nonce': nonce}\n try:\n customerCardResponse = customers_api_instance.create_customer_card(\n customer.id, customerCardRequest)\n except:\n print('customer card creation failed')\n exit()\n customerCard = customerCardResponse.card\n body = {'customer_id': customer.id, 'customer_card_id': customerCard.id,\n 'idempotency_key': idempotency_key, 'amount_money': amount}\n customersList = customers_api_instance.list_customers()\nelse:\n heading = 'One time Donation'\n body = {'idempotency_key': idempotency_key, 'card_nonce': nonce,\n 'amount_money': amount}\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = 'Exception when calling TransactionApi->charge: {}'.format(e)\nprint('Content-type:text/html\\r\\n\\r\\n')\nprint('<html>')\nprint('<head>')\nprint('<title>Square Payment</title>')\nprint('</head>')\nprint('<body>')\nprint('<h2>Result: </h2>')\nprint('<h2>{}</h2>'.format(heading))\nprint('<p>{}</p>'.format(res))\nif customersList:\n print('<h2>Customers stored on File: </h2>')\n for customer in customersList.customers:\n print('<p>{}</p>'.format(customer))\nprint('</body>')\nprint('</html>')\n",
"step-4": "from __future__ import print_function\nimport uuid\nimport cgi\nimport squareconnect\nfrom squareconnect.rest import ApiException\nfrom squareconnect.apis.transactions_api import TransactionsApi\nfrom squareconnect.apis.locations_api import LocationsApi\nfrom squareconnect.apis.customers_api import CustomersApi\nform = cgi.FieldStorage()\nnonce = form.getvalue('nonce')\ndonation = form.getvalue('amount')\nboxChecked = form.getvalue('boxChecked')\nfirstName = form.getvalue('firstname')\nlastName = form.getvalue('lastname')\nemail = form.getvalue('email')\nsquareconnect.configuration.access_token = (\n 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ')\nlocation_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'\ntransactions_api_instance = TransactionsApi()\ncustomers_api_instance = CustomersApi()\nidempotency_key = str(uuid.uuid1())\namount = {'amount': int(donation) * 100, 'currency': 'USD'}\ncustomersList = []\nif boxChecked == 'true':\n heading = 'Recurring Donation'\n customerRequest = {'given_name': firstName, 'family_name': lastName,\n 'email_address': email}\n try:\n customerResponse = customers_api_instance.create_customer(\n customerRequest)\n except ApiException as e:\n print('customer creation failed')\n print(e)\n exit()\n customer = customerResponse.customer\n customerCardRequest = {'card_nonce': nonce}\n try:\n customerCardResponse = customers_api_instance.create_customer_card(\n customer.id, customerCardRequest)\n except:\n print('customer card creation failed')\n exit()\n customerCard = customerCardResponse.card\n body = {'customer_id': customer.id, 'customer_card_id': customerCard.id,\n 'idempotency_key': idempotency_key, 'amount_money': amount}\n customersList = customers_api_instance.list_customers()\nelse:\n heading = 'One time Donation'\n body = {'idempotency_key': idempotency_key, 'card_nonce': nonce,\n 'amount_money': amount}\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = 'Exception when calling TransactionApi->charge: {}'.format(e)\nprint('Content-type:text/html\\r\\n\\r\\n')\nprint('<html>')\nprint('<head>')\nprint('<title>Square Payment</title>')\nprint('</head>')\nprint('<body>')\nprint('<h2>Result: </h2>')\nprint('<h2>{}</h2>'.format(heading))\nprint('<p>{}</p>'.format(res))\nif customersList:\n print('<h2>Customers stored on File: </h2>')\n for customer in customersList.customers:\n print('<p>{}</p>'.format(customer))\nprint('</body>')\nprint('</html>')\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\nimport uuid\nimport cgi\n\nimport squareconnect\nfrom squareconnect.rest import ApiException\nfrom squareconnect.apis.transactions_api import TransactionsApi\nfrom squareconnect.apis.locations_api import LocationsApi\nfrom squareconnect.apis.customers_api import CustomersApi\n\n# Create instance of FieldStorage\nform = cgi.FieldStorage()\n\n# Get data from fields\nnonce = form.getvalue('nonce')\n# Get amount data\ndonation = form.getvalue('amount')\n\nboxChecked = form.getvalue('boxChecked')\nfirstName = form.getvalue('firstname')\nlastName = form.getvalue('lastname')\nemail = form.getvalue('email')\n\n\n# The access token to use in all Connect API requests. Use your *sandbox* access\n# token if you're just testing things out.\nsquareconnect.configuration.access_token = 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ'\n\n# The ID of the business location to associate processed payments with.\n# See [Retrieve your business's locations]\n# (https://docs.connect.squareup.com/articles/getting-started/#retrievemerchantprofile)\n# for an easy way to get your business's location IDs.\n# If you're testing things out, use a sandbox location ID.\nlocation_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'\n\ntransactions_api_instance = TransactionsApi()\ncustomers_api_instance = CustomersApi()\n\n# Every payment you process with the SDK must have a unique idempotency key.\n# If you're unsure whether a particular payment succeeded, you can reattempt\n# it with the same idempotency key without worrying about double charging\n# the buyer.\nidempotency_key = str(uuid.uuid1())\n\n# Monetary amounts are specified in the smallest unit of the applicable currency.\n# This amount is in cents. It's also hard-coded for $1.00, which isn't very useful.\namount = {'amount': int(donation) * 100, 'currency': 'USD'}\n\ncustomersList = []\n\n# Add a customer to file\nif boxChecked == \"true\": \n\theading = \"Recurring Donation\"\n\tcustomerRequest = {'given_name': firstName, 'family_name': lastName, 'email_address': email}\n\n\ttry:\n\t\tcustomerResponse = customers_api_instance.create_customer(customerRequest)\n\texcept ApiException as e:\n\t\tprint (\"customer creation failed\")\n\t\tprint (e)\n\t\texit()\n\n\tcustomer = customerResponse.customer\n\tcustomerCardRequest = {'card_nonce': nonce}\n\n\ttry:\n\t\tcustomerCardResponse = customers_api_instance.create_customer_card(customer.id, customerCardRequest)\n\texcept:\n\t\tprint (\"customer card creation failed\")\n\t\texit()\n\n\tcustomerCard = customerCardResponse.card\n\n\tbody = {'customer_id': customer.id, 'customer_card_id': customerCard.id, 'idempotency_key': idempotency_key, 'amount_money': amount}\n\tcustomersList = customers_api_instance.list_customers()\nelse:\n\t# To learn more about splitting transactions with additional recipients,\n\t# see the Transactions API documentation on our [developer site]\n\t# (https://docs.connect.squareup.com/payments/transactions/overview#mpt-overview).\n\theading = \"One time Donation\"\n\tbody = {'idempotency_key': idempotency_key, 'card_nonce': nonce, 'amount_money': amount}\n\t# customersList = Non\n\n\n# The SDK throws an exception if a Connect endpoint responds with anything besides\n# a 200-level HTTP code. This block catches any exceptions that occur from the request.\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = \"Exception when calling TransactionApi->charge: {}\".format(e)\n\n# Display the result\nprint ('Content-type:text/html\\r\\n\\r\\n')\nprint ('<html>')\nprint ('<head>')\nprint ('<title>Square Payment</title>')\nprint ('</head>')\nprint ('<body>')\nprint ('<h2>Result: </h2>')\nprint( '<h2>{}</h2>'.format(heading))\nprint ('<p>{}</p>'.format(res))\nif customersList:\n\tprint( '<h2>Customers stored on File: </h2>')\n\tfor customer in customersList.customers:\n\t\tprint ('<p>{}</p>'.format(customer))\n\nprint ('</body>')\nprint ('</html>')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
import csv
import math
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original)) # list()
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def less_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem <= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
def char_ests(time_c, pos_c, c_initial, c_max, c_final):
"""
This function estimates the characteristics of the waveform we're analyzing
:param time_c: A list of time values to determine the time it takes for certain things to occur
:param pos_c: A list of position values to determine the position at certain values of time
:param c_initial: The initial position value of our waveform
:param c_max: The maximum position value of our waveform
:param c_final: The final value of our waveform
:return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).
"""
# Index values for time statements
maxdex = pos_c.index(c_max)
ten_perc = (c_final + c_initial) * 0.1
tr_10 = greater_than_index(pos_c, ten_perc)
ninety_p = (c_final + c_initial) * 0.9
tr_90 = greater_than_index(pos_c, ninety_p)
# Calculations
t_r = time_c[tr_10] - time_c[tr_90] # Rise time
t_p = time_c[maxdex] # Peak time
# Adjusted %OS eq
p_os_fix = ((c_max - c_final) / (c_final-c_initial)) * 100 # %OS
# two percent calcs
two_perc = (c_final - c_initial) * 0.02
c_thresh_low = c_final - two_perc
c_thresh_high = c_final + two_perc
mcfly = list(reversed(time_c))
beckett = list(reversed(pos_c))
minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(beckett, c_thresh_high)]
t_s = mcfly[min(minlist)] # Settling time
return t_r, t_p, p_os_fix, t_s
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os/100)
den_zet = math.sqrt(math.pi**2 + math.log(perc_os/100)**2)
zeta = num_zet/den_zet
omega = 4 / (zeta*settle_t)
m_spr = 1 # Told to assume mass is always 1 (unit)
k_spr = omega**2
c_spr = 2*zeta*omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f, 'rise_time': t_rise, 'peak_time': t_peak,
'perc_overshoot': percos, 'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
if __name__ == '__main__':
print(analyze_data('data1.csv'))
# print(analyze_data('data2.csv'))
# print(analyze_data('data3.csv'))
# print(analyze_data('data4.csv'))
|
normal
|
{
"blob_id": "4545ce36c4d3df50e263d3323c04c53acb2b50e0",
"index": 7888,
"step-1": "<mask token>\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\n<mask token>\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\n<mask token>\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\n<mask token>\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\n \"\"\"\n This function estimates the characteristics of the waveform we're analyzing\n :param time_c: A list of time values to determine the time it takes for certain things to occur\n :param pos_c: A list of position values to determine the position at certain values of time\n :param c_initial: The initial position value of our waveform\n :param c_max: The maximum position value of our waveform\n :param c_final: The final value of our waveform\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\n \"\"\"\n maxdex = pos_c.index(c_max)\n ten_perc = (c_final + c_initial) * 0.1\n tr_10 = greater_than_index(pos_c, ten_perc)\n ninety_p = (c_final + c_initial) * 0.9\n tr_90 = greater_than_index(pos_c, ninety_p)\n t_r = time_c[tr_10] - time_c[tr_90]\n t_p = time_c[maxdex]\n p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100\n two_perc = (c_final - c_initial) * 0.02\n c_thresh_low = c_final - two_perc\n c_thresh_high = c_final + two_perc\n mcfly = list(reversed(time_c))\n beckett = list(reversed(pos_c))\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(\n beckett, c_thresh_high)]\n t_s = mcfly[min(minlist)]\n return t_r, t_p, p_os_fix, t_s\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef less_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem <= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\n \"\"\"\n This function estimates the characteristics of the waveform we're analyzing\n :param time_c: A list of time values to determine the time it takes for certain things to occur\n :param pos_c: A list of position values to determine the position at certain values of time\n :param c_initial: The initial position value of our waveform\n :param c_max: The maximum position value of our waveform\n :param c_final: The final value of our waveform\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\n \"\"\"\n maxdex = pos_c.index(c_max)\n ten_perc = (c_final + c_initial) * 0.1\n tr_10 = greater_than_index(pos_c, ten_perc)\n ninety_p = (c_final + c_initial) * 0.9\n tr_90 = greater_than_index(pos_c, ninety_p)\n t_r = time_c[tr_10] - time_c[tr_90]\n t_p = time_c[maxdex]\n p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100\n two_perc = (c_final - c_initial) * 0.02\n c_thresh_low = c_final - two_perc\n c_thresh_high = c_final + two_perc\n mcfly = list(reversed(time_c))\n beckett = list(reversed(pos_c))\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(\n beckett, c_thresh_high)]\n t_s = mcfly[min(minlist)]\n return t_r, t_p, p_os_fix, t_s\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\nif __name__ == '__main__':\n print(analyze_data('data1.csv'))\n",
"step-4": "import csv\nimport math\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef less_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem <= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\n \"\"\"\n This function estimates the characteristics of the waveform we're analyzing\n :param time_c: A list of time values to determine the time it takes for certain things to occur\n :param pos_c: A list of position values to determine the position at certain values of time\n :param c_initial: The initial position value of our waveform\n :param c_max: The maximum position value of our waveform\n :param c_final: The final value of our waveform\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\n \"\"\"\n maxdex = pos_c.index(c_max)\n ten_perc = (c_final + c_initial) * 0.1\n tr_10 = greater_than_index(pos_c, ten_perc)\n ninety_p = (c_final + c_initial) * 0.9\n tr_90 = greater_than_index(pos_c, ninety_p)\n t_r = time_c[tr_10] - time_c[tr_90]\n t_p = time_c[maxdex]\n p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100\n two_perc = (c_final - c_initial) * 0.02\n c_thresh_low = c_final - two_perc\n c_thresh_high = c_final + two_perc\n mcfly = list(reversed(time_c))\n beckett = list(reversed(pos_c))\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(\n beckett, c_thresh_high)]\n t_s = mcfly[min(minlist)]\n return t_r, t_p, p_os_fix, t_s\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\nif __name__ == '__main__':\n print(analyze_data('data1.csv'))\n",
"step-5": "#!/usr/bin/env python3\r\n\r\nimport csv\r\nimport math\r\n\r\n\r\ndef load_data_from_file(filename):\r\n \"\"\"\r\n Load that data, my dude(tte)\r\n :param filename: The file from which you want to load data\r\n :return: Time and position data of the file\r\n \"\"\"\r\n time = []\r\n position = []\r\n with open(filename, 'r') as original:\r\n time_position = list(csv.reader(original)) # list()\r\n for row in range(1, len(time_position)):\r\n time.append(float(time_position[row][0]))\r\n position.append(float(time_position[row][1]))\r\n\r\n return time, position\r\n\r\n\r\ndef greater_than_index(numlist, singnum):\r\n \"\"\"\r\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\r\n list encounters a value greater than, or equal to, the value of interest.\r\n :param numlist: The list of ints\r\n :param singnum: The int to compare the list to\r\n :return: The index value of the position >= value of interest\r\n \"\"\"\r\n try:\r\n for elem in numlist:\r\n if elem >= singnum:\r\n e_val = numlist.index(elem)\r\n return e_val\r\n except ValueError:\r\n return 'None. Try a value contained within the list.'\r\n\r\n\r\ndef less_than_index(numlist, singnum):\r\n \"\"\"\r\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\r\n list encounters a value greater than, or equal to, the value of interest.\r\n :param numlist: The list of ints\r\n :param singnum: The int to compare the list to\r\n :return: The index value of the position >= value of interest\r\n \"\"\"\r\n try:\r\n for elem in numlist:\r\n if elem <= singnum:\r\n e_val = numlist.index(elem)\r\n return e_val\r\n except ValueError:\r\n return 'None. Try a value contained within the list.'\r\n\r\n\r\ndef ini_max_fin(pos1):\r\n c_initial = pos1[0]\r\n c_max = max(pos1)\r\n c_final = pos1[-1]\r\n return c_initial, c_max, c_final\r\n\r\n\r\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\r\n \"\"\"\r\n This function estimates the characteristics of the waveform we're analyzing\r\n :param time_c: A list of time values to determine the time it takes for certain things to occur\r\n :param pos_c: A list of position values to determine the position at certain values of time\r\n :param c_initial: The initial position value of our waveform\r\n :param c_max: The maximum position value of our waveform\r\n :param c_final: The final value of our waveform\r\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\r\n \"\"\"\r\n # Index values for time statements\r\n maxdex = pos_c.index(c_max)\r\n ten_perc = (c_final + c_initial) * 0.1\r\n tr_10 = greater_than_index(pos_c, ten_perc)\r\n ninety_p = (c_final + c_initial) * 0.9\r\n tr_90 = greater_than_index(pos_c, ninety_p)\r\n\r\n # Calculations\r\n t_r = time_c[tr_10] - time_c[tr_90] # Rise time\r\n t_p = time_c[maxdex] # Peak time\r\n\r\n # Adjusted %OS eq\r\n p_os_fix = ((c_max - c_final) / (c_final-c_initial)) * 100 # %OS\r\n\r\n # two percent calcs\r\n two_perc = (c_final - c_initial) * 0.02\r\n c_thresh_low = c_final - two_perc\r\n c_thresh_high = c_final + two_perc\r\n mcfly = list(reversed(time_c))\r\n beckett = list(reversed(pos_c))\r\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(beckett, c_thresh_high)]\r\n\r\n t_s = mcfly[min(minlist)] # Settling time\r\n\r\n return t_r, t_p, p_os_fix, t_s\r\n\r\n\r\ndef get_system_params(perc_os, settle_t):\r\n \"\"\"\r\n :param perc_os: The Overshoot Percentage value from which to calculate things \r\n :param settle_t: The settling time from which to calculate things\r\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\r\n \"\"\"\r\n\r\n num_zet = -math.log(perc_os/100)\r\n den_zet = math.sqrt(math.pi**2 + math.log(perc_os/100)**2)\r\n zeta = num_zet/den_zet\r\n omega = 4 / (zeta*settle_t)\r\n m_spr = 1 # Told to assume mass is always 1 (unit)\r\n k_spr = omega**2\r\n c_spr = 2*zeta*omega\r\n return m_spr, k_spr, c_spr\r\n\r\n\r\ndef analyze_data(filename):\r\n \"\"\"\r\n :param filename: A name for the csv file to run the resulting operations \r\n :return: A dictionary with some gucci values\r\n \"\"\"\r\n backtime, backpos = load_data_from_file(filename)\r\n c_i, c_m, c_f = ini_max_fin(backpos)\r\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\r\n m, k, c = get_system_params(percos, t_set)\r\n\r\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f, 'rise_time': t_rise, 'peak_time': t_peak,\r\n 'perc_overshoot': percos, 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\r\n 'system_damping': c}\r\n true_dict = {}\r\n for key in sorted(dict_party):\r\n true_dict.update({key: dict_party[key]})\r\n\r\n return true_dict\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n print(analyze_data('data1.csv'))\r\n # print(analyze_data('data2.csv'))\r\n # print(analyze_data('data3.csv'))\r\n # print(analyze_data('data4.csv'))\r\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 16:04:19 2018
@author: khanhle
"""
# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.utils import np_utils
from keras.layers.convolutional import Convolution2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout, Flatten
from keras.callbacks import ModelCheckpoint
import numpy as np
from sklearn.metrics import confusion_matrix
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
print(__doc__)
import h5py
import os
import sys
from keras.models import model_from_json
#define params
trn_file = sys.argv[1]
tst_file = sys.argv[2]
json_file = sys.argv[3]
h5_file = sys.argv[4]
nb_classes = 2
nb_kernels = 3
nb_pools = 2
window_sizes = 19
# load training dataset
dataset = np.loadtxt(trn_file, delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)
Y = dataset[:,window_sizes*20]
Y = np_utils.to_categorical(Y,nb_classes)
#print X,Y
#nb_classes = Y.shape[1]
#print nb_classes
# load testing dataset
dataset1 = np.loadtxt(tst_file, delimiter=",")
# split into input (X) and output (Y) variables
X1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)
Y1 = dataset1[:,window_sizes*20]
true_labels = np.asarray(Y1)
Y1 = np_utils.to_categorical(Y1,nb_classes)
#print('label : ', Y[i,:])
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
# model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
## add the model on top of the convolutional base
#model.add(top_model)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
#model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(nb_classes))
#model.add(BatchNormalization())
model.add(Activation('softmax'))
# f = open('model_summary.txt','w')
# f.write(str(model.summary()))
# f.close()
#model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
#plot_filters(model.layers[0],32,1)
# Fit the model
# save best weights
model = cnn_model()
#plot_model(model, to_file='model.png')
filepath = "weights.best.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
# balance data
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])
## evaluate the model
scores = model.evaluate(X1, Y1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
model.load_weights(filepath)
predictions = model.predict_classes(X1)
print(confusion_matrix(true_labels, predictions))
#serialize model to JSON
model_json = model.to_json()
with open(json_file, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(h5_file)
print("Saved model to disk")
|
normal
|
{
"blob_id": "721f23d2b6109194b8bca54b1cd04263e30cdf24",
"index": 3964,
"step-1": "<mask token>\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(seed)\nprint(__doc__)\n<mask token>\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\n<mask token>\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\n<mask token>\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\n<mask token>\nprint(confusion_matrix(true_labels, predictions))\n<mask token>\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-3": "<mask token>\nseed = 7\nnp.random.seed(seed)\nprint(__doc__)\n<mask token>\ntrn_file = sys.argv[1]\ntst_file = sys.argv[2]\njson_file = sys.argv[3]\nh5_file = sys.argv[4]\nnb_classes = 2\nnb_kernels = 3\nnb_pools = 2\nwindow_sizes = 19\ndataset = np.loadtxt(trn_file, delimiter=',')\nX = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)\nY = dataset[:, window_sizes * 20]\nY = np_utils.to_categorical(Y, nb_classes)\ndataset1 = np.loadtxt(tst_file, delimiter=',')\nX1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,\n window_sizes)\nY1 = dataset1[:, window_sizes * 20]\ntrue_labels = np.asarray(Y1)\nY1 = np_utils.to_categorical(Y1, nb_classes)\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\nmodel = cnn_model()\nfilepath = 'weights.best.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=True)\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\nscores = model.evaluate(X1, Y1)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\npredictions = model.predict_classes(X1)\nprint(confusion_matrix(true_labels, predictions))\nmodel_json = model.to_json()\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-4": "<mask token>\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Activation\nfrom keras.utils import np_utils\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.core import Dropout, Flatten\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nseed = 7\nnp.random.seed(seed)\nprint(__doc__)\nimport h5py\nimport os\nimport sys\nfrom keras.models import model_from_json\ntrn_file = sys.argv[1]\ntst_file = sys.argv[2]\njson_file = sys.argv[3]\nh5_file = sys.argv[4]\nnb_classes = 2\nnb_kernels = 3\nnb_pools = 2\nwindow_sizes = 19\ndataset = np.loadtxt(trn_file, delimiter=',')\nX = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)\nY = dataset[:, window_sizes * 20]\nY = np_utils.to_categorical(Y, nb_classes)\ndataset1 = np.loadtxt(tst_file, delimiter=',')\nX1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,\n window_sizes)\nY1 = dataset1[:, window_sizes * 20]\ntrue_labels = np.asarray(Y1)\nY1 = np_utils.to_categorical(Y1, nb_classes)\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\nmodel = cnn_model()\nfilepath = 'weights.best.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=True)\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\nscores = model.evaluate(X1, Y1)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\npredictions = model.predict_classes(X1)\nprint(confusion_matrix(true_labels, predictions))\nmodel_json = model.to_json()\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 3 16:04:19 2018\r\n\r\n@author: khanhle\r\n\"\"\"\r\n\r\n\r\n\r\n# Create first network with Keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Activation\r\nfrom keras.utils import np_utils\r\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D\r\nfrom keras.layers.pooling import MaxPooling2D\r\nfrom keras.layers.core import Dropout, Flatten\r\nfrom keras.callbacks import ModelCheckpoint\r\nimport numpy as np\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n# fix random seed for reproducibility\r\nseed = 7\r\nnp.random.seed(seed)\r\n\r\nprint(__doc__)\r\n\r\nimport h5py\r\nimport os\r\nimport sys\r\nfrom keras.models import model_from_json\r\n\r\n#define params\r\ntrn_file = sys.argv[1]\r\ntst_file = sys.argv[2]\r\njson_file = sys.argv[3]\r\nh5_file = sys.argv[4]\r\n\r\nnb_classes = 2\r\nnb_kernels = 3\r\nnb_pools = 2\r\nwindow_sizes = 19\r\n\r\n# load training dataset\r\ndataset = np.loadtxt(trn_file, delimiter=\",\")\r\n# split into input (X) and output (Y) variables\r\nX = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)\r\nY = dataset[:,window_sizes*20]\r\n\r\nY = np_utils.to_categorical(Y,nb_classes)\r\n#print X,Y\r\n#nb_classes = Y.shape[1]\r\n#print nb_classes\r\n\r\n# load testing dataset\r\ndataset1 = np.loadtxt(tst_file, delimiter=\",\")\r\n# split into input (X) and output (Y) variables\r\nX1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)\r\nY1 = dataset1[:,window_sizes*20]\r\ntrue_labels = np.asarray(Y1)\r\n\r\nY1 = np_utils.to_categorical(Y1,nb_classes)\r\n#print('label : ', Y[i,:])\r\n\r\ndef cnn_model():\r\n model = Sequential()\r\n\r\n model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))\r\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n # model.add(ZeroPadding2D((1,1)))\r\n # model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(Activation('relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n # model.add(ZeroPadding2D((1,1)))\r\n # model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n ## add the model on top of the convolutional base\r\n #model.add(top_model)\r\n model.add(Flatten())\r\n model.add(Dropout(0.5))\r\n model.add(Dense(128))\r\n #model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n\r\n model.add(Dense(nb_classes))\r\n #model.add(BatchNormalization())\r\n model.add(Activation('softmax'))\r\n\r\n # f = open('model_summary.txt','w')\r\n # f.write(str(model.summary()))\r\n # f.close()\r\n\r\n #model.compile(loss='categorical_crossentropy', optimizer='adadelta')\r\n # Compile model\r\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])\r\n return model\r\n\r\n#plot_filters(model.layers[0],32,1)\r\n# Fit the model\r\n# save best weights\r\nmodel = cnn_model()\r\n#plot_model(model, to_file='model.png')\r\n\r\nfilepath = \"weights.best.hdf5\"\r\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)\r\n# balance data\r\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])\r\n## evaluate the model\r\nscores = model.evaluate(X1, Y1)\r\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\r\n\r\nmodel.load_weights(filepath)\r\npredictions = model.predict_classes(X1)\r\n\r\nprint(confusion_matrix(true_labels, predictions))\r\n\r\n#serialize model to JSON\r\nmodel_json = model.to_json()\r\nwith open(json_file, \"w\") as json_file:\r\n json_file.write(model_json)\r\n# serialize weights to HDF5\r\nmodel.save_weights(h5_file)\r\nprint(\"Saved model to disk\")\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2
import numpy as np
img = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))
erode = cv2.erode(img, kernel)
contorno = img - erode
cv2.imshow('Original', img)
cv2.imshow('Contorno', contorno)
cv2.waitKey()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "809c9ce2b017612bedd1eb889c2b017275ee8b6f",
"index": 1729,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('Original', img)\ncv2.imshow('Contorno', contorno)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)\nkernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))\nerode = cv2.erode(img, kernel)\ncontorno = img - erode\ncv2.imshow('Original', img)\ncv2.imshow('Contorno', contorno)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimg = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)\nkernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))\nerode = cv2.erode(img, kernel)\ncontorno = img - erode\ncv2.imshow('Original', img)\ncv2.imshow('Contorno', contorno)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#list,for replacing element we use {a[0='']}:for adding element we use {append()}
a=['somesh','aakash','sarika','datta','rudra','4mridula']
a[2] = 'nandini'
a.append('sarika')
print(a[2])
print(a)
|
normal
|
{
"blob_id": "5c643dfce9cf7a9f774957ff4819d3be8ac4f1da",
"index": 7376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\na.append('sarika')\nprint(a[2])\nprint(a)\n",
"step-3": "a = ['somesh', 'aakash', 'sarika', 'datta', 'rudra', '4mridula']\na[2] = 'nandini'\na.append('sarika')\nprint(a[2])\nprint(a)\n",
"step-4": "#list,for replacing element we use {a[0='']}:for adding element we use {append()}\r\na=['somesh','aakash','sarika','datta','rudra','4mridula']\r\na[2] = 'nandini'\r\na.append('sarika')\r\nprint(a[2])\r\nprint(a)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 4 Pillars of OOP:
# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.
# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.
# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).
# 4. Polymorphism: Polymorphism means the ability to take various forms.
# Encapsulation:
# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.
# This mechanism is often used to protect the data of an object from other objects.
# It’s one of the fundamental principles in any programming language that supports object-oriented programming.
# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.
# Once we make a variable as private, we can’t access them directly from the objects of that class.
# Now, let’s see how to create private variables:
# eg:
from abc import abstractmethod, ABC
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
# In the above example, wall is a private variable.
# Once a variable is declared as private, the only way to access those variables is through name mangling.
# In the name mangling process, an identifier with two leading underscores and one trailing underscore is
# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.
house = House(1)
# Using name mangling to access private variables
print(house._House__wall) # OutPut - 1
# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
# Abstraction:
# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.
# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.
# ABC is a class from the abc module in Python.
# If we extend any class with ABC and include any abstraction methods,
# then the classes inherited from this class will have to mandatorily implement those abstract methods.
# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).
# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print("Starting engine")
def stop(self):
print("Stopping engine")
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print("Car is in drive mode")
# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.
# Car is another class that is inherited from Vehicle, so it had to implement the drive method.
|
normal
|
{
"blob_id": "0e4c82d6eb77d2b6357925c9aab516bcc3310a4c",
"index": 140,
"step-1": "<mask token>\n\n\nclass House2:\n <mask token>\n <mask token>\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-2": "<mask token>\n\n\nclass House:\n <mask token>\n\n\n<mask token>\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-3": "<mask token>\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n\n<mask token>\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-4": "<mask token>\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n\n<mask token>\nprint(house._House__wall)\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-5": "# 4 Pillars of OOP:\n# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.\n# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.\n# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).\n# 4. Polymorphism: Polymorphism means the ability to take various forms.\n\n# Encapsulation:\n\n# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.\n# This mechanism is often used to protect the data of an object from other objects.\n# It’s one of the fundamental principles in any programming language that supports object-oriented programming.\n# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.\n# Once we make a variable as private, we can’t access them directly from the objects of that class.\n# Now, let’s see how to create private variables:\n\n# eg:\nfrom abc import abstractmethod, ABC\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n# In the above example, wall is a private variable.\n# Once a variable is declared as private, the only way to access those variables is through name mangling.\n# In the name mangling process, an identifier with two leading underscores and one trailing underscore is\n# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.\n\n\nhouse = House(1)\n\n# Using name mangling to access private variables\nprint(house._House__wall) # OutPut - 1\n\n# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\n# Abstraction:\n\n# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.\n# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.\n# ABC is a class from the abc module in Python.\n# If we extend any class with ABC and include any abstraction methods,\n# then the classes inherited from this class will have to mandatorily implement those abstract methods.\n# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).\n# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .\n\n\nclass Vehicle(ABC):\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print(\"Starting engine\")\n\n def stop(self):\n print(\"Stopping engine\")\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print(\"Car is in drive mode\")\n\n\n# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.\n# Car is another class that is inherited from Vehicle, so it had to implement the drive method.\n",
"step-ids": [
9,
12,
13,
14,
17
]
}
|
[
9,
12,
13,
14,
17
] |
name = ''
while name != 'your name' and name != 'Your name':
print('Please type your name.')
name = input()
print('Thanks!')
#while 1 == 2 or :
# print('Type your name')
# name = input()
# if name == 'your name':
# break
#print('Thanks!')
|
normal
|
{
"blob_id": "f3644b42d1a6c87c6169f8d123dadf6cd209270c",
"index": 2617,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile name != 'your name' and name != 'Your name':\n print('Please type your name.')\n name = input()\nprint('Thanks!')\n",
"step-3": "name = ''\nwhile name != 'your name' and name != 'Your name':\n print('Please type your name.')\n name = input()\nprint('Thanks!')\n",
"step-4": "name = ''\nwhile name != 'your name' and name != 'Your name':\n print('Please type your name.')\n name = input()\nprint('Thanks!')\n\n#while 1 == 2 or :\n# print('Type your name')\n# name = input()\n# if name == 'your name':\n# break\n#print('Thanks!')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
Classes
'''
class Person:
alive = True
'''
Possible Attributes for a Person:
1. Name
2. Age
3. Gender
'''
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print("Hello ", self.name)
def greetByTime(self, time="Morning"):
print("Hello", self.name, " . ", time)
print("Accessing Static Variable", Person.alive)
p = Person("John", 30, "Male")
print("\n\nAccessing Functions \n\n")
p.greet()
p.greetByTime()
p.greetByTime("Goodnight")
print("\n\nAccessing Variables \n\n")
print(p.name, p.age, p.gender)
|
normal
|
{
"blob_id": "11feb13f38f2484c867a8b3fa525ffecf419dfe5",
"index": 9957,
"step-1": "<mask token>\n\n\nclass Person:\n alive = True\n <mask token>\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\nprint('Accessing Static Variable', Person.alive)\n<mask token>\nprint(\"\"\"\n\nAccessing Functions \n\n\"\"\")\np.greet()\np.greetByTime()\np.greetByTime('Goodnight')\nprint(\"\"\"\n\nAccessing Variables \n\n\"\"\")\nprint(p.name, p.age, p.gender)\n",
"step-4": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\nprint('Accessing Static Variable', Person.alive)\np = Person('John', 30, 'Male')\nprint(\"\"\"\n\nAccessing Functions \n\n\"\"\")\np.greet()\np.greetByTime()\np.greetByTime('Goodnight')\nprint(\"\"\"\n\nAccessing Variables \n\n\"\"\")\nprint(p.name, p.age, p.gender)\n",
"step-5": "'''\n\nClasses\n\n'''\n\n\nclass Person:\n alive = True\n\n '''\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n '''\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print(\"Hello \", self.name)\n\n def greetByTime(self, time=\"Morning\"):\n print(\"Hello\", self.name, \" . \", time)\n\n\nprint(\"Accessing Static Variable\", Person.alive)\np = Person(\"John\", 30, \"Male\")\n\nprint(\"\\n\\nAccessing Functions \\n\\n\")\np.greet()\np.greetByTime()\np.greetByTime(\"Goodnight\")\n\nprint(\"\\n\\nAccessing Variables \\n\\n\")\nprint(p.name, p.age, p.gender)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import math
def solution(X, Y, D):
# write your code in Python 3.6
xy = Y-X;
if xy == 0: return 0
jumps = math.ceil(xy/D)
return jumps
|
normal
|
{
"blob_id": "bdf819d8a5bc3906febced785c6d95db7dc3a603",
"index": 2376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(X, Y, D):\n xy = Y - X\n if xy == 0:\n return 0\n jumps = math.ceil(xy / D)\n return jumps\n",
"step-3": "import math\n\n\ndef solution(X, Y, D):\n xy = Y - X\n if xy == 0:\n return 0\n jumps = math.ceil(xy / D)\n return jumps\n",
"step-4": "import math\ndef solution(X, Y, D):\n # write your code in Python 3.6\n xy = Y-X;\n if xy == 0: return 0\n jumps = math.ceil(xy/D)\n return jumps\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
contador_pares = 0
contador_impares = 0
for i in range(100):
numero = int(input('Digite um valor:'))
if numero % 2 == 0:
contador_pares += 1
else:
contador_impares += 1
print('A quantidade de números pares é igual a:', contador_pares)
print('A quantidade de números ímpares é igual a:', contador_impares)
|
normal
|
{
"blob_id": "03aa33861def30a46de85c5b309878a1180a760f",
"index": 5211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(100):\n numero = int(input('Digite um valor:'))\n if numero % 2 == 0:\n contador_pares += 1\n else:\n contador_impares += 1\nprint('A quantidade de números pares é igual a:', contador_pares)\nprint('A quantidade de números ímpares é igual a:', contador_impares)\n",
"step-3": "contador_pares = 0\ncontador_impares = 0\nfor i in range(100):\n numero = int(input('Digite um valor:'))\n if numero % 2 == 0:\n contador_pares += 1\n else:\n contador_impares += 1\nprint('A quantidade de números pares é igual a:', contador_pares)\nprint('A quantidade de números ímpares é igual a:', contador_impares)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import art
import random
print(art.guess)
print(art.the)
print(art.number)
print("I'm thinking of a number between 1 and 100")
number = random.randint(1,100)
turns = 0
difficulty = input("Chose a difficulty. 'easy' or 'hard'?\n")
if difficulty == 'easy':
turns +=10
else:
turns +=5
gameover = False
while not gameover:
print(f"You've got {turns} turns left!")
guess = int(input("Guess a number!\n"))
if guess > number:
print("too high!")
turns -= 1
elif guess < number:
print("too low!")
turns -= 1
elif guess == number:
print("Thats it! You Win!")
gameover = True
if turns == 0:
print("You used all your chances!")
print("GAME OVER")
gameover = True
|
normal
|
{
"blob_id": "f2bf4f5b057af1d2362ec8d1472aa76e774be1c7",
"index": 2736,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\n<mask token>\nif difficulty == 'easy':\n turns += 10\nelse:\n turns += 5\n<mask token>\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input('Guess a number!\\n'))\n if guess > number:\n print('too high!')\n turns -= 1\n elif guess < number:\n print('too low!')\n turns -= 1\n elif guess == number:\n print('Thats it! You Win!')\n gameover = True\n if turns == 0:\n print('You used all your chances!')\n print('GAME OVER')\n gameover = True\n",
"step-3": "<mask token>\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\nnumber = random.randint(1, 100)\nturns = 0\ndifficulty = input(\"Chose a difficulty. 'easy' or 'hard'?\\n\")\nif difficulty == 'easy':\n turns += 10\nelse:\n turns += 5\ngameover = False\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input('Guess a number!\\n'))\n if guess > number:\n print('too high!')\n turns -= 1\n elif guess < number:\n print('too low!')\n turns -= 1\n elif guess == number:\n print('Thats it! You Win!')\n gameover = True\n if turns == 0:\n print('You used all your chances!')\n print('GAME OVER')\n gameover = True\n",
"step-4": "import art\nimport random\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\nnumber = random.randint(1, 100)\nturns = 0\ndifficulty = input(\"Chose a difficulty. 'easy' or 'hard'?\\n\")\nif difficulty == 'easy':\n turns += 10\nelse:\n turns += 5\ngameover = False\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input('Guess a number!\\n'))\n if guess > number:\n print('too high!')\n turns -= 1\n elif guess < number:\n print('too low!')\n turns -= 1\n elif guess == number:\n print('Thats it! You Win!')\n gameover = True\n if turns == 0:\n print('You used all your chances!')\n print('GAME OVER')\n gameover = True\n",
"step-5": "import art\nimport random\n\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\n\nnumber = random.randint(1,100)\nturns = 0\n\ndifficulty = input(\"Chose a difficulty. 'easy' or 'hard'?\\n\")\n\nif difficulty == 'easy':\n turns +=10\nelse:\n turns +=5\n\ngameover = False\n\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input(\"Guess a number!\\n\"))\n\n if guess > number:\n print(\"too high!\")\n turns -= 1\n elif guess < number:\n print(\"too low!\")\n turns -= 1\n elif guess == number:\n print(\"Thats it! You Win!\")\n gameover = True\n\n if turns == 0:\n print(\"You used all your chances!\")\n print(\"GAME OVER\")\n gameover = True",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Homework 2 PyPoll
#The total number of votes cast
#A complete list of candidates who received votes
#The percentage of votes each candidate won
#The total number of votes each candidate won
#The winner of the election based on popular vote.
#First we'll import the os module
# This will allow us to create file paths across operating systems
import os
#currentDirectory = os.getcwd()
# Module for reading CSV files
import csv
csvpath = os.path.join('election_data.csv')
with open('election_data.csv') as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
print(csvreader)
|
normal
|
{
"blob_id": "800d87a879987c47f1a66b729932279fc8d4fa38",
"index": 7314,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('election_data.csv') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n print(csvreader)\n",
"step-3": "<mask token>\ncsvpath = os.path.join('election_data.csv')\nwith open('election_data.csv') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n print(csvreader)\n",
"step-4": "import os\nimport csv\ncsvpath = os.path.join('election_data.csv')\nwith open('election_data.csv') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n print(csvreader)\n",
"step-5": " #Homework 2 PyPoll\n #The total number of votes cast\n#A complete list of candidates who received votes\n#The percentage of votes each candidate won\n#The total number of votes each candidate won\n#The winner of the election based on popular vote.\n \n #First we'll import the os module\n# This will allow us to create file paths across operating systems\nimport os\n\n#currentDirectory = os.getcwd()\n\n# Module for reading CSV files\nimport csv\n\ncsvpath = os.path.join('election_data.csv')\n\nwith open('election_data.csv') as csvfile:\n\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n print(csvreader)\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
import pyautogui
from xpinyin import Pinyin
rubbish_dic=1
if rubbish_dic==0:
chinese_rubbish=(
u"草泥马",
u"你妈死了",
u"你是不是",
u"低能",
u"人话都听不懂",
u"没家教的狗东西",
)
elif rubbish_dic==1:
rubbish_file=open("rubbish_dic.txt")
chinese_rubbish=rubbish_file.read().splitlines()
rubbish_set=[] #最终的拼音方式
p=Pinyin() #用于转换拼音
#通过点击的方式切屏
def trans_screen():
pyautogui.doubleClick(492,974)
pyautogui.typewrite(['enter'],0.01)
#将中文转化成拼音
def trans_chinese():
for c_rubbish in chinese_rubbish:
pin=p.get_pinyin(c_rubbish,'')
pin_list=list(pin)
pin_list.append("1")
rubbish_set.append(pin_list)
#发送text
def send_rubbish():
for p_rubbish in rubbish_set:
pyautogui.typewrite(p_rubbish,0.01)
pyautogui.typewrite(['enter'],0.01)
#查看当前的rubbish_set内容
def chk_rubbish():
for p_dirty in rubbish_set:
print(p_dirty)
if __name__ == "__main__":
trans_chinese()
#chk_rubbish()
trans_screen()
send_rubbish()
|
normal
|
{
"blob_id": "23e673909b2f1eb9a265ce84ad63464e20e99c6a",
"index": 3449,
"step-1": "<mask token>\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\n<mask token>\n",
"step-2": "<mask token>\nif rubbish_dic == 0:\n chinese_rubbish = u'草泥马', u'你妈死了', u'你是不是', u'低能', u'人话都听不懂', u'没家教的狗东西'\nelif rubbish_dic == 1:\n rubbish_file = open('rubbish_dic.txt')\n chinese_rubbish = rubbish_file.read().splitlines()\n<mask token>\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\nif __name__ == '__main__':\n trans_chinese()\n trans_screen()\n send_rubbish()\n",
"step-3": "<mask token>\nrubbish_dic = 1\nif rubbish_dic == 0:\n chinese_rubbish = u'草泥马', u'你妈死了', u'你是不是', u'低能', u'人话都听不懂', u'没家教的狗东西'\nelif rubbish_dic == 1:\n rubbish_file = open('rubbish_dic.txt')\n chinese_rubbish = rubbish_file.read().splitlines()\nrubbish_set = []\np = Pinyin()\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\nif __name__ == '__main__':\n trans_chinese()\n trans_screen()\n send_rubbish()\n",
"step-4": "import pyautogui\nfrom xpinyin import Pinyin\nrubbish_dic = 1\nif rubbish_dic == 0:\n chinese_rubbish = u'草泥马', u'你妈死了', u'你是不是', u'低能', u'人话都听不懂', u'没家教的狗东西'\nelif rubbish_dic == 1:\n rubbish_file = open('rubbish_dic.txt')\n chinese_rubbish = rubbish_file.read().splitlines()\nrubbish_set = []\np = Pinyin()\n\n\ndef trans_screen():\n pyautogui.doubleClick(492, 974)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin = p.get_pinyin(c_rubbish, '')\n pin_list = list(pin)\n pin_list.append('1')\n rubbish_set.append(pin_list)\n\n\ndef send_rubbish():\n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish, 0.01)\n pyautogui.typewrite(['enter'], 0.01)\n\n\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\n\nif __name__ == '__main__':\n trans_chinese()\n trans_screen()\n send_rubbish()\n",
"step-5": "# coding=utf-8\nimport pyautogui\nfrom xpinyin import Pinyin\n\nrubbish_dic=1\n\nif rubbish_dic==0:\n chinese_rubbish=(\n u\"草泥马\",\n u\"你妈死了\",\n u\"你是不是\",\n u\"低能\",\n u\"人话都听不懂\",\n u\"没家教的狗东西\", \n )\nelif rubbish_dic==1:\n rubbish_file=open(\"rubbish_dic.txt\")\n chinese_rubbish=rubbish_file.read().splitlines()\n\n\nrubbish_set=[] #最终的拼音方式\np=Pinyin() #用于转换拼音\n\n#通过点击的方式切屏 \ndef trans_screen():\n pyautogui.doubleClick(492,974)\n pyautogui.typewrite(['enter'],0.01)\n\n#将中文转化成拼音\ndef trans_chinese():\n for c_rubbish in chinese_rubbish:\n pin=p.get_pinyin(c_rubbish,'')\n pin_list=list(pin)\n pin_list.append(\"1\")\n rubbish_set.append(pin_list)\n\n#发送text\ndef send_rubbish(): \n for p_rubbish in rubbish_set:\n pyautogui.typewrite(p_rubbish,0.01)\n pyautogui.typewrite(['enter'],0.01)\n\n#查看当前的rubbish_set内容\ndef chk_rubbish():\n for p_dirty in rubbish_set:\n print(p_dirty)\n\nif __name__ == \"__main__\":\n trans_chinese()\n #chk_rubbish()\n trans_screen()\n send_rubbish()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.