code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import datetime
count = 0
for y in xrange(1901,2001):
for m in xrange(1,13):
if datetime.date(y,m,1).weekday() == 6:
count += 1
print count
|
normal
|
{
"blob_id": "7430e17d1c424362399cf09a0c3ecae825d04567",
"index": 2996,
"step-1": "import datetime\ncount = 0\nfor y in xrange(1901,2001):\n\tfor m in xrange(1,13):\n\t\tif datetime.date(y,m,1).weekday() == 6:\n\t\t\tcount += 1\n\nprint count\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from telegram.ext import Updater, Filters, MessageHandler, PicklePersistence
import telegram
import logging
logging.basicConfig(format='%(asctime)s %(message)s\n',
level=logging.INFO,filename='log.json')
logger = logging.getLogger(__name__)
def main():
# my_persistence = PicklePersistence(filename="users") #incomment if you need persistence
# updater = Updater("",persistence=my_persistence,use_context=True)
updater = Updater("",use_context=True)
dp = updater.dispatcher
jobs = updater.job_queue
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "0a90f29a4e18c2aed23cb31b4239d44d23526327",
"index": 9133,
"step-1": "<mask token>\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(format='%(asctime)s %(message)s\\n', level=logging.INFO,\n filename='log.json')\n<mask token>\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nlogging.basicConfig(format='%(asctime)s %(message)s\\n', level=logging.INFO,\n filename='log.json')\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from telegram.ext import Updater, Filters, MessageHandler, PicklePersistence\nimport telegram\nimport logging\nlogging.basicConfig(format='%(asctime)s %(message)s\\n', level=logging.INFO,\n filename='log.json')\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n updater = Updater('', use_context=True)\n dp = updater.dispatcher\n jobs = updater.job_queue\n dp.add_error_handler(error)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from telegram.ext import Updater, Filters, MessageHandler, PicklePersistence\nimport telegram\nimport logging\n\n\nlogging.basicConfig(format='%(asctime)s %(message)s\\n',\n level=logging.INFO,filename='log.json')\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n\n # my_persistence = PicklePersistence(filename=\"users\") #incomment if you need persistence \n \n # updater = Updater(\"\",persistence=my_persistence,use_context=True)\n updater = Updater(\"\",use_context=True)\n\n dp = updater.dispatcher\n jobs = updater.job_queue\n \n\n \n dp.add_error_handler(error)\n\n updater.start_polling()\n \n updater.idle()\n\n\n\nif __name__==\"__main__\":\n main()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def group(arr):
low, mid, high = 0, 0, len(arr)-1
while mid <= high:
print(arr)
if arr[mid] == 'R' :
arr[low], arr[mid] = arr[mid], arr[low]
low += 1
mid += 1
elif arr[mid] == 'G':
mid += 1
else:
arr[high], arr[mid] = arr[mid], arr[high]
high -= 1
return arr
*arr, = map(str, input("enter the list of R, G, B").split())
print(group(arr))
|
normal
|
{
"blob_id": "8ad47bf292e0046550cc0ef6f6bb75cf179ebd4b",
"index": 7477,
"step-1": "<mask token>\n",
"step-2": "def group(arr):\n low, mid, high = 0, 0, len(arr) - 1\n while mid <= high:\n print(arr)\n if arr[mid] == 'R':\n arr[low], arr[mid] = arr[mid], arr[low]\n low += 1\n mid += 1\n elif arr[mid] == 'G':\n mid += 1\n else:\n arr[high], arr[mid] = arr[mid], arr[high]\n high -= 1\n return arr\n\n\n<mask token>\n",
"step-3": "def group(arr):\n low, mid, high = 0, 0, len(arr) - 1\n while mid <= high:\n print(arr)\n if arr[mid] == 'R':\n arr[low], arr[mid] = arr[mid], arr[low]\n low += 1\n mid += 1\n elif arr[mid] == 'G':\n mid += 1\n else:\n arr[high], arr[mid] = arr[mid], arr[high]\n high -= 1\n return arr\n\n\n<mask token>\nprint(group(arr))\n",
"step-4": "def group(arr):\n low, mid, high = 0, 0, len(arr) - 1\n while mid <= high:\n print(arr)\n if arr[mid] == 'R':\n arr[low], arr[mid] = arr[mid], arr[low]\n low += 1\n mid += 1\n elif arr[mid] == 'G':\n mid += 1\n else:\n arr[high], arr[mid] = arr[mid], arr[high]\n high -= 1\n return arr\n\n\n*arr, = map(str, input('enter the list of R, G, B').split())\nprint(group(arr))\n",
"step-5": "def group(arr):\r\n low, mid, high = 0, 0, len(arr)-1\r\n while mid <= high:\r\n print(arr)\r\n if arr[mid] == 'R' :\r\n arr[low], arr[mid] = arr[mid], arr[low]\r\n low += 1\r\n mid += 1\r\n elif arr[mid] == 'G':\r\n mid += 1\r\n else:\r\n arr[high], arr[mid] = arr[mid], arr[high]\r\n high -= 1\r\n return arr\r\n \r\n*arr, = map(str, input(\"enter the list of R, G, B\").split())\r\n\r\nprint(group(arr))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
n, a, b = map(int, input().split())
cl = list(map(int, input().split()))
for i in range(n):
if cl[i] == a + b:
print(i + 1)
|
normal
|
{
"blob_id": "ff081a5ff46ab37dc5a144fb4616c06ef3bca490",
"index": 7286,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n if cl[i] == a + b:\n print(i + 1)\n",
"step-3": "n, a, b = map(int, input().split())\ncl = list(map(int, input().split()))\nfor i in range(n):\n if cl[i] == a + b:\n print(i + 1)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""Test an example."""
from . import main
def test_readme_escaping() -> None:
"""Ensure the demo matches expected."""
assert main() == "<div><span>Escaping</span></div>"
|
normal
|
{
"blob_id": "7b459aad399a31f61b8686e1919b38d5538924b8",
"index": 2014,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_readme_escaping() ->None:\n \"\"\"Ensure the demo matches expected.\"\"\"\n assert main() == '<div><span>Escaping</span></div>'\n",
"step-3": "<mask token>\nfrom . import main\n\n\ndef test_readme_escaping() ->None:\n \"\"\"Ensure the demo matches expected.\"\"\"\n assert main() == '<div><span>Escaping</span></div>'\n",
"step-4": "\"\"\"Test an example.\"\"\"\nfrom . import main\n\n\ndef test_readme_escaping() -> None:\n \"\"\"Ensure the demo matches expected.\"\"\"\n assert main() == \"<div><span>Escaping</span></div>\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
"""
.. module:: convert
:synopsis: used to create info.txt and the <txname>.txt files.
"""
import sys
import os
import argparse
argparser = argparse.ArgumentParser(description =
'create info.txt, txname.txt, twiki.txt and sms.py')
argparser.add_argument ('-utilsPath', '--utilsPath',
help = 'path to the package smodels_utils',\
type = str )
argparser.add_argument ('-smodelsPath', '--smodelsPath',
help = 'path to the package smodels_utils',\
type = str )
args = argparser.parse_args()
if args.utilsPath:
utilsPath = args.utilsPath
else:
databaseRoot = '../../../'
sys.path.append(os.path.abspath(databaseRoot))
from utilsPath import utilsPath
utilsPath = databaseRoot + utilsPath
if args.smodelsPath:
sys.path.append(os.path.abspath(args.smodelsPath))
sys.path.append(os.path.abspath(utilsPath))
from smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput
from smodels_utils.dataPreparation.databaseCreation import databaseCreator
from smodels_utils.dataPreparation.massPlaneObjects import x, y, z
#+++++++ global info block ++++++++++++++
info = MetaInfoInput('ATLAS-SUSY-2013-19')
info.comment = 'T2tt UL are from DF channel only, no combined UL map available'
info.sqrts = '8.0'
info.private = False
info.lumi = '20.3'
info.publication = 'http://link.springer.com/article/10.1007/JHEP06(2014)124'
info.url = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/'
info.arxiv = 'http://arxiv.org/abs/1403.4853'
info.prettyName = '2 OS leptons + (b)jets + Etmiss (leptonic/hadronic m_T2)'
info.supersedes = 'ATLAS-CONF-2013-048'
#+++++++ dataset block ++++++++++++++
dataset = DataSetInput('data')
dataset.setInfo(dataType = 'upperLimit', dataId = None)
#+++++++ next txName block ++++++++++++++
T2tt = dataset.addTxName('T2tt')
T2tt.constraint ="[[['t+']],[['t-']]]"
T2tt.conditionDescription ="None"
T2tt.condition ="None"
T2tt.source = "ATLAS"
#+++++++ next mass plane block ++++++++++++++
T2tt = T2tt.addMassPlane(2*[[x, y]])
T2tt.dataUrl = "http://hepdata.cedar.ac.uk/view/ins1286444/d72"
T2tt.histoDataUrl = "https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png"
T2tt.figure = "fig 10a"
T2tt.figureUrl = "https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png"
T2tt.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T2tt_DF.txt', 'orig/T2tt.txt'],
dataFormats= ['txt', 'txt'])
#+++++++ next txName block ++++++++++++++
T2bbWW = dataset.addTxName('T2bbWW')
T2bbWW.constraint ="[[['b','W+']],[['b','W-']]]"
T2bbWW.conditionDescription ="None"
T2bbWW.condition ="None"
T2bbWW.source = "ATLAS"
#+++++++ next mass plane block ++++++++++++++
T2bbWW = T2bbWW.addMassPlane(2*[[x, y]])
T2bbWW.figure = 'Fig.(aux) 3e'
T2bbWW.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_03e.png'
T2bbWW.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d42'
T2bbWW.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T2bbWW.txt', 'orig/T2bbWW.txt'],
dataFormats= ['txt', 'txt'])
#+++++++ next txName block ++++++++++++++
T6bbWW = dataset.addTxName('T6bbWW')
T6bbWW.checked ="VM"
T6bbWW.constraint ="[[['b'],['W+']],[['b'],['W-']]]"
T6bbWW.conditionDescription ="None"
T6bbWW.condition ="None"
T6bbWW.source = "ATLAS"
T6bbWW.massConstraint = None
T6bbWWoff = dataset.addTxName('T6bbWWoff')
T6bbWWoff.constraint ="22*([[['b'],['l+','nu']],[['b'],['l-','nu']]])"
T6bbWWoff.conditionDescription="[[['b'],['l+','nu']],[['b'],['l-','nu']]] > 2*[[['b'],['e+','nu']],[['b'],['e-','nu']]]"
T6bbWWoff.condition="Cgtr([[['b'],['l+','nu']],[['b'],['l-','nu']]],2*[[['b'],['e+','nu']],[['b'],['e-','nu']]])"
T6bbWWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0', 'dm <= 76.0']]
T6bbWWoff.source = "ATLAS"
#+++++++ next mass plane block ++++++++++++++
T6bbWWLSP001 = T6bbWW.addMassPlane(2*[[x, y, 1.0]])
T6bbWWLSP001.figure = 'Fig.(aux) 3a'
T6bbWWLSP001.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3a.png'
T6bbWWLSP001.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d30'
T6bbWWLSP001.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWLSP001.txt', 'orig/T6bbWWLSP001.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWLSP001)
#+++++++ next mass plane block ++++++++++++++
T6bbWWD010 = T6bbWW.addMassPlane(2*[[x, x-10.0, y]])
T6bbWWD010.figure = "fig(aux) 3b"
T6bbWWD010.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3b.png'
T6bbWWD010.dataUrl = 'Not defined'
T6bbWWD010.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWD10.txt', 'orig/T6bbWWD010.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWD010)
#+++++++ next mass plane block ++++++++++++++
T6bbWWM1300 = T6bbWW.addMassPlane(2*[[300.0, x, y]])
T6bbWWM1300.figure = 'Fig.(aux) 3c'
T6bbWWM1300.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_16.png'
T6bbWWM1300.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d36'
T6bbWWM1300.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWM1300.txt', 'orig/T6bbWWM1300.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWM1300)
#+++++++ next mass plane block ++++++++++++++
T6bbWWC106 = T6bbWW.addMassPlane(2*[[x, 106.0, y]])
T6bbWWC106.figure = 'Fig.(aux) 3f'
T6bbWWC106.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_20.png'
T6bbWWC106.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d68'
T6bbWWC106.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWC106.txt', 'orig/T6bbWWC106.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWC106)
#+++++++ next mass plane block ++++++++++++++
T6bbWWx200 = T6bbWW.addMassPlane(2*[[x, y*2.0, y]])
T6bbWWx200.figure = 'Fig.(aux) 3d'
T6bbWWx200.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_17.png'
T6bbWWx200.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d39'
T6bbWWx200.setSources(dataLabels= ['obsExclusion', 'upperLimits'],
dataFiles= ['orig/exclusionline_T6bbWWx200.txt', 'orig/T6bbWWx200.txt'],
dataFormats= ['txt', 'txt'])
T6bbWWoff.addMassPlane(T6bbWWx200)
databaseCreator.create()
|
normal
|
{
"blob_id": "c80b31bc154d5c1c8f9fc0ac226295160f2f9473",
"index": 4249,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nargparser.add_argument('-utilsPath', '--utilsPath', help=\n 'path to the package smodels_utils', type=str)\nargparser.add_argument('-smodelsPath', '--smodelsPath', help=\n 'path to the package smodels_utils', type=str)\n<mask token>\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\nsys.path.append(os.path.abspath(utilsPath))\n<mask token>\ndataset.setInfo(dataType='upperLimit', dataId=None)\n<mask token>\nT2tt.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles=[\n 'orig/exclusionline_T2tt_DF.txt', 'orig/T2tt.txt'], dataFormats=['txt',\n 'txt'])\n<mask token>\nT2bbWW.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles=[\n 'orig/exclusionline_T2bbWW.txt', 'orig/T2bbWW.txt'], dataFormats=['txt',\n 'txt'])\n<mask token>\nT6bbWWLSP001.setSources(dataLabels=['obsExclusion', 'upperLimits'],\n dataFiles=['orig/exclusionline_T6bbWWLSP001.txt',\n 'orig/T6bbWWLSP001.txt'], dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWLSP001)\n<mask token>\nT6bbWWD010.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWD10.txt', 'orig/T6bbWWD010.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWD010)\n<mask token>\nT6bbWWM1300.setSources(dataLabels=['obsExclusion', 'upperLimits'],\n dataFiles=['orig/exclusionline_T6bbWWM1300.txt', 'orig/T6bbWWM1300.txt'\n ], dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWM1300)\n<mask token>\nT6bbWWC106.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWC106.txt', 'orig/T6bbWWC106.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWC106)\n<mask token>\nT6bbWWx200.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWx200.txt', 'orig/T6bbWWx200.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWx200)\ndatabaseCreator.create()\n",
"step-3": "<mask token>\nargparser = argparse.ArgumentParser(description=\n 'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument('-utilsPath', '--utilsPath', help=\n 'path to the package smodels_utils', type=str)\nargparser.add_argument('-smodelsPath', '--smodelsPath', help=\n 'path to the package smodels_utils', type=str)\nargs = argparser.parse_args()\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\nsys.path.append(os.path.abspath(utilsPath))\n<mask token>\ninfo = MetaInfoInput('ATLAS-SUSY-2013-19')\ninfo.comment = 'T2tt UL are from DF channel only, no combined UL map available'\ninfo.sqrts = '8.0'\ninfo.private = False\ninfo.lumi = '20.3'\ninfo.publication = 'http://link.springer.com/article/10.1007/JHEP06(2014)124'\ninfo.url = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/')\ninfo.arxiv = 'http://arxiv.org/abs/1403.4853'\ninfo.prettyName = '2 OS leptons + (b)jets + Etmiss (leptonic/hadronic m_T2)'\ninfo.supersedes = 'ATLAS-CONF-2013-048'\ndataset = DataSetInput('data')\ndataset.setInfo(dataType='upperLimit', dataId=None)\nT2tt = dataset.addTxName('T2tt')\nT2tt.constraint = \"[[['t+']],[['t-']]]\"\nT2tt.conditionDescription = 'None'\nT2tt.condition = 'None'\nT2tt.source = 'ATLAS'\nT2tt = T2tt.addMassPlane(2 * [[x, y]])\nT2tt.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d72'\nT2tt.histoDataUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png'\n )\nT2tt.figure = 'fig 10a'\nT2tt.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png'\n )\nT2tt.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles=[\n 'orig/exclusionline_T2tt_DF.txt', 'orig/T2tt.txt'], dataFormats=['txt',\n 'txt'])\nT2bbWW = dataset.addTxName('T2bbWW')\nT2bbWW.constraint = \"[[['b','W+']],[['b','W-']]]\"\nT2bbWW.conditionDescription = 'None'\nT2bbWW.condition = 'None'\nT2bbWW.source = 'ATLAS'\nT2bbWW = T2bbWW.addMassPlane(2 * [[x, y]])\nT2bbWW.figure = 'Fig.(aux) 3e'\nT2bbWW.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_03e.png'\n )\nT2bbWW.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d42'\nT2bbWW.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles=[\n 'orig/exclusionline_T2bbWW.txt', 'orig/T2bbWW.txt'], dataFormats=['txt',\n 'txt'])\nT6bbWW = dataset.addTxName('T6bbWW')\nT6bbWW.checked = 'VM'\nT6bbWW.constraint = \"[[['b'],['W+']],[['b'],['W-']]]\"\nT6bbWW.conditionDescription = 'None'\nT6bbWW.condition = 'None'\nT6bbWW.source = 'ATLAS'\nT6bbWW.massConstraint = None\nT6bbWWoff = dataset.addTxName('T6bbWWoff')\nT6bbWWoff.constraint = \"22*([[['b'],['l+','nu']],[['b'],['l-','nu']]])\"\nT6bbWWoff.conditionDescription = (\n \"[[['b'],['l+','nu']],[['b'],['l-','nu']]] > 2*[[['b'],['e+','nu']],[['b'],['e-','nu']]]\"\n )\nT6bbWWoff.condition = (\n \"Cgtr([[['b'],['l+','nu']],[['b'],['l-','nu']]],2*[[['b'],['e+','nu']],[['b'],['e-','nu']]])\"\n )\nT6bbWWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0',\n 'dm <= 76.0']]\nT6bbWWoff.source = 'ATLAS'\nT6bbWWLSP001 = T6bbWW.addMassPlane(2 * [[x, y, 1.0]])\nT6bbWWLSP001.figure = 'Fig.(aux) 3a'\nT6bbWWLSP001.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3a.png'\n )\nT6bbWWLSP001.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d30'\nT6bbWWLSP001.setSources(dataLabels=['obsExclusion', 'upperLimits'],\n dataFiles=['orig/exclusionline_T6bbWWLSP001.txt',\n 'orig/T6bbWWLSP001.txt'], dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWLSP001)\nT6bbWWD010 = T6bbWW.addMassPlane(2 * [[x, x - 10.0, y]])\nT6bbWWD010.figure = 'fig(aux) 3b'\nT6bbWWD010.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3b.png'\n )\nT6bbWWD010.dataUrl = 'Not defined'\nT6bbWWD010.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWD10.txt', 'orig/T6bbWWD010.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWD010)\nT6bbWWM1300 = T6bbWW.addMassPlane(2 * [[300.0, x, y]])\nT6bbWWM1300.figure = 'Fig.(aux) 3c'\nT6bbWWM1300.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_16.png'\n )\nT6bbWWM1300.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d36'\nT6bbWWM1300.setSources(dataLabels=['obsExclusion', 'upperLimits'],\n dataFiles=['orig/exclusionline_T6bbWWM1300.txt', 'orig/T6bbWWM1300.txt'\n ], dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWM1300)\nT6bbWWC106 = T6bbWW.addMassPlane(2 * [[x, 106.0, y]])\nT6bbWWC106.figure = 'Fig.(aux) 3f'\nT6bbWWC106.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_20.png'\n )\nT6bbWWC106.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d68'\nT6bbWWC106.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWC106.txt', 'orig/T6bbWWC106.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWC106)\nT6bbWWx200 = T6bbWW.addMassPlane(2 * [[x, y * 2.0, y]])\nT6bbWWx200.figure = 'Fig.(aux) 3d'\nT6bbWWx200.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_17.png'\n )\nT6bbWWx200.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d39'\nT6bbWWx200.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWx200.txt', 'orig/T6bbWWx200.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWx200)\ndatabaseCreator.create()\n",
"step-4": "<mask token>\nimport sys\nimport os\nimport argparse\nargparser = argparse.ArgumentParser(description=\n 'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument('-utilsPath', '--utilsPath', help=\n 'path to the package smodels_utils', type=str)\nargparser.add_argument('-smodelsPath', '--smodelsPath', help=\n 'path to the package smodels_utils', type=str)\nargs = argparser.parse_args()\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\nsys.path.append(os.path.abspath(utilsPath))\nfrom smodels_utils.dataPreparation.inputObjects import MetaInfoInput, DataSetInput\nfrom smodels_utils.dataPreparation.databaseCreation import databaseCreator\nfrom smodels_utils.dataPreparation.massPlaneObjects import x, y, z\ninfo = MetaInfoInput('ATLAS-SUSY-2013-19')\ninfo.comment = 'T2tt UL are from DF channel only, no combined UL map available'\ninfo.sqrts = '8.0'\ninfo.private = False\ninfo.lumi = '20.3'\ninfo.publication = 'http://link.springer.com/article/10.1007/JHEP06(2014)124'\ninfo.url = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/')\ninfo.arxiv = 'http://arxiv.org/abs/1403.4853'\ninfo.prettyName = '2 OS leptons + (b)jets + Etmiss (leptonic/hadronic m_T2)'\ninfo.supersedes = 'ATLAS-CONF-2013-048'\ndataset = DataSetInput('data')\ndataset.setInfo(dataType='upperLimit', dataId=None)\nT2tt = dataset.addTxName('T2tt')\nT2tt.constraint = \"[[['t+']],[['t-']]]\"\nT2tt.conditionDescription = 'None'\nT2tt.condition = 'None'\nT2tt.source = 'ATLAS'\nT2tt = T2tt.addMassPlane(2 * [[x, y]])\nT2tt.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d72'\nT2tt.histoDataUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png'\n )\nT2tt.figure = 'fig 10a'\nT2tt.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png'\n )\nT2tt.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles=[\n 'orig/exclusionline_T2tt_DF.txt', 'orig/T2tt.txt'], dataFormats=['txt',\n 'txt'])\nT2bbWW = dataset.addTxName('T2bbWW')\nT2bbWW.constraint = \"[[['b','W+']],[['b','W-']]]\"\nT2bbWW.conditionDescription = 'None'\nT2bbWW.condition = 'None'\nT2bbWW.source = 'ATLAS'\nT2bbWW = T2bbWW.addMassPlane(2 * [[x, y]])\nT2bbWW.figure = 'Fig.(aux) 3e'\nT2bbWW.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_03e.png'\n )\nT2bbWW.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d42'\nT2bbWW.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles=[\n 'orig/exclusionline_T2bbWW.txt', 'orig/T2bbWW.txt'], dataFormats=['txt',\n 'txt'])\nT6bbWW = dataset.addTxName('T6bbWW')\nT6bbWW.checked = 'VM'\nT6bbWW.constraint = \"[[['b'],['W+']],[['b'],['W-']]]\"\nT6bbWW.conditionDescription = 'None'\nT6bbWW.condition = 'None'\nT6bbWW.source = 'ATLAS'\nT6bbWW.massConstraint = None\nT6bbWWoff = dataset.addTxName('T6bbWWoff')\nT6bbWWoff.constraint = \"22*([[['b'],['l+','nu']],[['b'],['l-','nu']]])\"\nT6bbWWoff.conditionDescription = (\n \"[[['b'],['l+','nu']],[['b'],['l-','nu']]] > 2*[[['b'],['e+','nu']],[['b'],['e-','nu']]]\"\n )\nT6bbWWoff.condition = (\n \"Cgtr([[['b'],['l+','nu']],[['b'],['l-','nu']]],2*[[['b'],['e+','nu']],[['b'],['e-','nu']]])\"\n )\nT6bbWWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0',\n 'dm <= 76.0']]\nT6bbWWoff.source = 'ATLAS'\nT6bbWWLSP001 = T6bbWW.addMassPlane(2 * [[x, y, 1.0]])\nT6bbWWLSP001.figure = 'Fig.(aux) 3a'\nT6bbWWLSP001.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3a.png'\n )\nT6bbWWLSP001.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d30'\nT6bbWWLSP001.setSources(dataLabels=['obsExclusion', 'upperLimits'],\n dataFiles=['orig/exclusionline_T6bbWWLSP001.txt',\n 'orig/T6bbWWLSP001.txt'], dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWLSP001)\nT6bbWWD010 = T6bbWW.addMassPlane(2 * [[x, x - 10.0, y]])\nT6bbWWD010.figure = 'fig(aux) 3b'\nT6bbWWD010.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3b.png'\n )\nT6bbWWD010.dataUrl = 'Not defined'\nT6bbWWD010.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWD10.txt', 'orig/T6bbWWD010.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWD010)\nT6bbWWM1300 = T6bbWW.addMassPlane(2 * [[300.0, x, y]])\nT6bbWWM1300.figure = 'Fig.(aux) 3c'\nT6bbWWM1300.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_16.png'\n )\nT6bbWWM1300.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d36'\nT6bbWWM1300.setSources(dataLabels=['obsExclusion', 'upperLimits'],\n dataFiles=['orig/exclusionline_T6bbWWM1300.txt', 'orig/T6bbWWM1300.txt'\n ], dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWM1300)\nT6bbWWC106 = T6bbWW.addMassPlane(2 * [[x, 106.0, y]])\nT6bbWWC106.figure = 'Fig.(aux) 3f'\nT6bbWWC106.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_20.png'\n )\nT6bbWWC106.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d68'\nT6bbWWC106.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWC106.txt', 'orig/T6bbWWC106.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWC106)\nT6bbWWx200 = T6bbWW.addMassPlane(2 * [[x, y * 2.0, y]])\nT6bbWWx200.figure = 'Fig.(aux) 3d'\nT6bbWWx200.figureUrl = (\n 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_17.png'\n )\nT6bbWWx200.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d39'\nT6bbWWx200.setSources(dataLabels=['obsExclusion', 'upperLimits'], dataFiles\n =['orig/exclusionline_T6bbWWx200.txt', 'orig/T6bbWWx200.txt'],\n dataFormats=['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWx200)\ndatabaseCreator.create()\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"\n.. module:: convert\n :synopsis: used to create info.txt and the <txname>.txt files.\n\n\"\"\"\nimport sys\nimport os\nimport argparse\n\nargparser = argparse.ArgumentParser(description = \n'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument ('-utilsPath', '--utilsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargparser.add_argument ('-smodelsPath', '--smodelsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargs = argparser.parse_args()\n\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\n\nsys.path.append(os.path.abspath(utilsPath))\nfrom smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput\nfrom smodels_utils.dataPreparation.databaseCreation import databaseCreator\nfrom smodels_utils.dataPreparation.massPlaneObjects import x, y, z\n\n\n\n#+++++++ global info block ++++++++++++++\ninfo = MetaInfoInput('ATLAS-SUSY-2013-19')\ninfo.comment = 'T2tt UL are from DF channel only, no combined UL map available'\ninfo.sqrts = '8.0'\ninfo.private = False\ninfo.lumi = '20.3'\ninfo.publication = 'http://link.springer.com/article/10.1007/JHEP06(2014)124'\ninfo.url = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/'\ninfo.arxiv = 'http://arxiv.org/abs/1403.4853'\ninfo.prettyName = '2 OS leptons + (b)jets + Etmiss (leptonic/hadronic m_T2)'\ninfo.supersedes = 'ATLAS-CONF-2013-048'\n\n\n#+++++++ dataset block ++++++++++++++\ndataset = DataSetInput('data')\ndataset.setInfo(dataType = 'upperLimit', dataId = None)\n\n#+++++++ next txName block ++++++++++++++\nT2tt = dataset.addTxName('T2tt')\nT2tt.constraint =\"[[['t+']],[['t-']]]\"\nT2tt.conditionDescription =\"None\"\nT2tt.condition =\"None\"\nT2tt.source = \"ATLAS\"\n#+++++++ next mass plane block ++++++++++++++\nT2tt = T2tt.addMassPlane(2*[[x, y]])\nT2tt.dataUrl = \"http://hepdata.cedar.ac.uk/view/ins1286444/d72\"\nT2tt.histoDataUrl = \"https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png\"\nT2tt.figure = \"fig 10a\"\nT2tt.figureUrl = \"https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_10a.png\"\nT2tt.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T2tt_DF.txt', 'orig/T2tt.txt'],\n dataFormats= ['txt', 'txt'])\n\n#+++++++ next txName block ++++++++++++++\nT2bbWW = dataset.addTxName('T2bbWW')\nT2bbWW.constraint =\"[[['b','W+']],[['b','W-']]]\"\nT2bbWW.conditionDescription =\"None\"\nT2bbWW.condition =\"None\"\nT2bbWW.source = \"ATLAS\"\n#+++++++ next mass plane block ++++++++++++++\nT2bbWW = T2bbWW.addMassPlane(2*[[x, y]])\nT2bbWW.figure = 'Fig.(aux) 3e'\nT2bbWW.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_03e.png'\nT2bbWW.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d42'\nT2bbWW.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T2bbWW.txt', 'orig/T2bbWW.txt'],\n dataFormats= ['txt', 'txt'])\n\n#+++++++ next txName block ++++++++++++++\nT6bbWW = dataset.addTxName('T6bbWW')\nT6bbWW.checked =\"VM\"\nT6bbWW.constraint =\"[[['b'],['W+']],[['b'],['W-']]]\"\nT6bbWW.conditionDescription =\"None\"\nT6bbWW.condition =\"None\"\nT6bbWW.source = \"ATLAS\"\nT6bbWW.massConstraint = None\nT6bbWWoff = dataset.addTxName('T6bbWWoff')\nT6bbWWoff.constraint =\"22*([[['b'],['l+','nu']],[['b'],['l-','nu']]])\"\nT6bbWWoff.conditionDescription=\"[[['b'],['l+','nu']],[['b'],['l-','nu']]] > 2*[[['b'],['e+','nu']],[['b'],['e-','nu']]]\"\nT6bbWWoff.condition=\"Cgtr([[['b'],['l+','nu']],[['b'],['l-','nu']]],2*[[['b'],['e+','nu']],[['b'],['e-','nu']]])\"\nT6bbWWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0', 'dm <= 76.0']]\nT6bbWWoff.source = \"ATLAS\"\n#+++++++ next mass plane block ++++++++++++++\nT6bbWWLSP001 = T6bbWW.addMassPlane(2*[[x, y, 1.0]])\nT6bbWWLSP001.figure = 'Fig.(aux) 3a'\nT6bbWWLSP001.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3a.png'\nT6bbWWLSP001.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d30'\nT6bbWWLSP001.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T6bbWWLSP001.txt', 'orig/T6bbWWLSP001.txt'],\n dataFormats= ['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWLSP001)\n#+++++++ next mass plane block ++++++++++++++\nT6bbWWD010 = T6bbWW.addMassPlane(2*[[x, x-10.0, y]])\nT6bbWWD010.figure = \"fig(aux) 3b\"\nT6bbWWD010.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/figaux_3b.png'\nT6bbWWD010.dataUrl = 'Not defined'\nT6bbWWD010.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T6bbWWD10.txt', 'orig/T6bbWWD010.txt'],\n dataFormats= ['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWD010)\n#+++++++ next mass plane block ++++++++++++++\nT6bbWWM1300 = T6bbWW.addMassPlane(2*[[300.0, x, y]])\nT6bbWWM1300.figure = 'Fig.(aux) 3c'\nT6bbWWM1300.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_16.png'\nT6bbWWM1300.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d36'\nT6bbWWM1300.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T6bbWWM1300.txt', 'orig/T6bbWWM1300.txt'],\n dataFormats= ['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWM1300)\n#+++++++ next mass plane block ++++++++++++++\nT6bbWWC106 = T6bbWW.addMassPlane(2*[[x, 106.0, y]])\nT6bbWWC106.figure = 'Fig.(aux) 3f'\nT6bbWWC106.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_20.png'\nT6bbWWC106.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d68'\nT6bbWWC106.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T6bbWWC106.txt', 'orig/T6bbWWC106.txt'],\n dataFormats= ['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWC106)\n#+++++++ next mass plane block ++++++++++++++\nT6bbWWx200 = T6bbWW.addMassPlane(2*[[x, y*2.0, y]])\nT6bbWWx200.figure = 'Fig.(aux) 3d'\nT6bbWWx200.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2013-19/fig_17.png'\nT6bbWWx200.dataUrl = 'http://hepdata.cedar.ac.uk/view/ins1286444/d39'\nT6bbWWx200.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/exclusionline_T6bbWWx200.txt', 'orig/T6bbWWx200.txt'],\n dataFormats= ['txt', 'txt'])\nT6bbWWoff.addMassPlane(T6bbWWx200)\n\n\n\ndatabaseCreator.create()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import sys
import socket
__target__ = '${EXTERNAL_HOST}'
sources = {}
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert (os.path.exists(fname) and os.path.isfile(fname)), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert (os.path.exists(dest) and os.path.isfile(dest)), 'Cannot proceed without the dest file in process_the_source().'
if (__name__ == '__main__'):
is_verbose = True
root = sys.argv[1]
host_ip = sys.argv[2]
assert (len(host_ip) > 0), 'Cannot proceed without the host ip address.'
assert (os.path.exists(root) and os.path.isdir(root)), 'Cannot proceed without the root in process_the_source().'
sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)
if (is_verbose):
print('BEGIN:')
for s,d in sources.items():
if (is_verbose):
print('{} -> {}'.format(s, d))
assert os.path.exists(s) and os.path.isfile(s), 'Cannot find "{}" so cannot proceed.'.format(s)
process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)
if (is_verbose):
print('END!!!')
if (is_verbose):
print()
print('Done.')
|
normal
|
{
"blob_id": "d6af9a75fbe8bdf1a81a352cee71ac81fb373b86",
"index": 9926,
"step-1": "<mask token>\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-3": "<mask token>\n__target__ = '${EXTERNAL_HOST}'\nsources = {}\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-4": "import os\nimport sys\nimport socket\n__target__ = '${EXTERNAL_HOST}'\nsources = {}\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-5": "import os\nimport sys\nimport socket\n\n__target__ = '${EXTERNAL_HOST}'\n\nsources = {}\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert (os.path.exists(fname) and os.path.isfile(fname)), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert (os.path.exists(dest) and os.path.isfile(dest)), 'Cannot proceed without the dest file in process_the_source().'\n \n\nif (__name__ == '__main__'):\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert (len(host_ip) > 0), 'Cannot proceed without the host ip address.'\n\n assert (os.path.exists(root) and os.path.isdir(root)), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n\n if (is_verbose):\n print('BEGIN:')\n for s,d in sources.items():\n if (is_verbose):\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if (is_verbose):\n print('END!!!')\n\n if (is_verbose):\n print()\n print('Done.')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
# Wordcount: count the occurrences of each word in that phrase.
def word_count(phrase):
phrase = re.sub(r'\W+|_', ' ', phrase.lower(), flags=re.UNICODE)
word_list = phrase.split()
wordfreq = [word_list.count(p) for p in word_list]
return dict(zip(word_list, wordfreq))
|
normal
|
{
"blob_id": "e12905efa0be7d69e2719c05b40d18c50e7e4b2e",
"index": 4933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef word_count(phrase):\n phrase = re.sub('\\\\W+|_', ' ', phrase.lower(), flags=re.UNICODE)\n word_list = phrase.split()\n wordfreq = [word_list.count(p) for p in word_list]\n return dict(zip(word_list, wordfreq))\n",
"step-3": "import re\n\n\ndef word_count(phrase):\n phrase = re.sub('\\\\W+|_', ' ', phrase.lower(), flags=re.UNICODE)\n word_list = phrase.split()\n wordfreq = [word_list.count(p) for p in word_list]\n return dict(zip(word_list, wordfreq))\n",
"step-4": "import re\n# Wordcount: count the occurrences of each word in that phrase.\ndef word_count(phrase):\n phrase = re.sub(r'\\W+|_', ' ', phrase.lower(), flags=re.UNICODE)\n word_list = phrase.split()\n wordfreq = [word_list.count(p) for p in word_list]\n return dict(zip(word_list, wordfreq))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import loops
class Card():
#to make a card you must type Card("Name of Card")
def check_cat(self,string):
if "Cat" in string:
return True
return False
def __init__(self,string):
self.type = string
self.cat = self.check_cat(self.type)
# self.image_back = image_back
# self.image_front = image_front
def __str__(self):
return self.type
#negates any action, except a defuse
def nope(self,arr_players,cards,turn_order):
count = 0
for i,k in enumerate(arr_players):
if i != turn_order:
for i,k in enumerate(k.hand):
if k == cards[11]:
count += 1
if count > 0:
print("A nope card can be played")
decision = input("Would a player like to play a nope card? (y/n)")
while decision != "y" and decision != "n":
decision = input("Would a player like to play a nope card? (y/n) ")
if decision == "n":
return False
elif decision == 'y':
for i,k in enumerate(arr_players):
print(str(i)+"-"+k.name)
player = int(input("Which player would like to play the nope card?"))
while (player < 0 or player > len(arr_players)) and players == turn_order:
player = int*input("Which player would like to play the nope card?")
arr_players[player].hand.remove(cards[11])
return True
return False
#makes another player choose a card to give away to current player
def favor(self,hand,player,arr_players,played_card):
recipient = loops.phase_of_taking(arr_players,player)
card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient))
print(card_taken,"was given")
recipient.hand.remove(card_taken)
player.hand.append(card_taken)
return True,False
#allows a player to steal a card from another player
def steal(self,hand,player,arr_players,played_card):
recipient = loops.phase_of_taking(arr_players,player)
card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient))
print("You stole",card_stolen.type)
hand.remove(played_card)
player.hand.append(card_stolen)
return True,False
#makes the player skip a turn
def skip(self,attack,pick):
print("Your turn has been skipped")
pick = False
return pick,attack
#the player makes the next person take his turn as well, forcing them to take 2 turns
def attack(self,attack,pick):
attack = True
pick = False
return pick,attack
#see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions
def see_future(self,decker):
if decker.cards_left() < 3:
for i in range(decker.cards_left()):
card = decker.draw_top(i)
print(card.type)
decker.add_card(card,i)
else:
for i in range(3):
card = decker.draw_top(i)
print(card.type)
decker.add_card(card,i)
|
normal
|
{
"blob_id": "3b71ef6c3681b8c5e6aadf2d125c35cbf3a12661",
"index": 6248,
"step-1": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n <mask token>\n\n def __str__(self):\n return self.type\n <mask token>\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n\n def __init__(self, string):\n self.type = string\n self.cat = self.check_cat(self.type)\n\n def __str__(self):\n return self.type\n\n def nope(self, arr_players, cards, turn_order):\n count = 0\n for i, k in enumerate(arr_players):\n if i != turn_order:\n for i, k in enumerate(k.hand):\n if k == cards[11]:\n count += 1\n if count > 0:\n print('A nope card can be played')\n decision = input('Would a player like to play a nope card? (y/n)')\n while decision != 'y' and decision != 'n':\n decision = input(\n 'Would a player like to play a nope card? (y/n) ')\n if decision == 'n':\n return False\n elif decision == 'y':\n for i, k in enumerate(arr_players):\n print(str(i) + '-' + k.name)\n player = int(input(\n 'Which player would like to play the nope card?'))\n while (player < 0 or player > len(arr_players)\n ) and players == turn_order:\n player = int * input(\n 'Which player would like to play the nope card?')\n arr_players[player].hand.remove(cards[11])\n return True\n return False\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n <mask token>\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Card:\n\n def check_cat(self, string):\n if 'Cat' in string:\n return True\n return False\n\n def __init__(self, string):\n self.type = string\n self.cat = self.check_cat(self.type)\n\n def __str__(self):\n return self.type\n\n def nope(self, arr_players, cards, turn_order):\n count = 0\n for i, k in enumerate(arr_players):\n if i != turn_order:\n for i, k in enumerate(k.hand):\n if k == cards[11]:\n count += 1\n if count > 0:\n print('A nope card can be played')\n decision = input('Would a player like to play a nope card? (y/n)')\n while decision != 'y' and decision != 'n':\n decision = input(\n 'Would a player like to play a nope card? (y/n) ')\n if decision == 'n':\n return False\n elif decision == 'y':\n for i, k in enumerate(arr_players):\n print(str(i) + '-' + k.name)\n player = int(input(\n 'Which player would like to play the nope card?'))\n while (player < 0 or player > len(arr_players)\n ) and players == turn_order:\n player = int * input(\n 'Which player would like to play the nope card?')\n arr_players[player].hand.remove(cards[11])\n return True\n return False\n\n def favor(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_taken = arr_players[recipient].hand.pop(loops.give_card(\n arr_players, recipient))\n print(card_taken, 'was given')\n recipient.hand.remove(card_taken)\n player.hand.append(card_taken)\n return True, False\n\n def steal(self, hand, player, arr_players, played_card):\n recipient = loops.phase_of_taking(arr_players, player)\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(\n arr_players, recipient))\n print('You stole', card_stolen.type)\n hand.remove(played_card)\n player.hand.append(card_stolen)\n return True, False\n\n def skip(self, attack, pick):\n print('Your turn has been skipped')\n pick = False\n return pick, attack\n\n def attack(self, attack, pick):\n attack = True\n pick = False\n return pick, attack\n\n def see_future(self, decker):\n if decker.cards_left() < 3:\n for i in range(decker.cards_left()):\n card = decker.draw_top(i)\n print(card.type)\n decker.add_card(card, i)\n else:\n for i in range(3):\n card = decker.draw_top(i)\n print(card.type)\n decker.add_card(card, i)\n",
"step-5": "import loops\r\n\r\nclass Card():\r\n #to make a card you must type Card(\"Name of Card\")\r\n def check_cat(self,string):\r\n if \"Cat\" in string:\r\n return True\r\n return False\r\n def __init__(self,string):\r\n self.type = string\r\n self.cat = self.check_cat(self.type)\r\n # self.image_back = image_back\r\n # self.image_front = image_front\r\n def __str__(self):\r\n return self.type\r\n #negates any action, except a defuse\r\n def nope(self,arr_players,cards,turn_order):\r\n count = 0\r\n for i,k in enumerate(arr_players):\r\n if i != turn_order:\r\n for i,k in enumerate(k.hand):\r\n if k == cards[11]:\r\n count += 1\r\n if count > 0:\r\n print(\"A nope card can be played\")\r\n decision = input(\"Would a player like to play a nope card? (y/n)\")\r\n while decision != \"y\" and decision != \"n\":\r\n decision = input(\"Would a player like to play a nope card? (y/n) \")\r\n if decision == \"n\":\r\n return False\r\n elif decision == 'y':\r\n for i,k in enumerate(arr_players):\r\n print(str(i)+\"-\"+k.name)\r\n player = int(input(\"Which player would like to play the nope card?\"))\r\n while (player < 0 or player > len(arr_players)) and players == turn_order:\r\n player = int*input(\"Which player would like to play the nope card?\")\r\n arr_players[player].hand.remove(cards[11])\r\n return True\r\n return False\r\n\r\n #makes another player choose a card to give away to current player\r\n def favor(self,hand,player,arr_players,played_card):\r\n recipient = loops.phase_of_taking(arr_players,player)\r\n card_taken = arr_players[recipient].hand.pop(loops.give_card(arr_players,recipient))\r\n print(card_taken,\"was given\")\r\n recipient.hand.remove(card_taken)\r\n player.hand.append(card_taken)\r\n return True,False\r\n #allows a player to steal a card from another player\r\n def steal(self,hand,player,arr_players,played_card):\r\n recipient = loops.phase_of_taking(arr_players,player)\r\n card_stolen = arr_players[recipient].hand.pop(loops.card_stealing(arr_players,recipient))\r\n print(\"You stole\",card_stolen.type)\r\n hand.remove(played_card)\r\n player.hand.append(card_stolen)\r\n return True,False\r\n #makes the player skip a turn\r\n def skip(self,attack,pick):\r\n print(\"Your turn has been skipped\")\r\n pick = False\r\n return pick,attack\r\n #the player makes the next person take his turn as well, forcing them to take 2 turns\r\n def attack(self,attack,pick):\r\n attack = True\r\n pick = False\r\n return pick,attack\r\n #see future draws the top three cards, prints the three cards, and puts the cards back in the correct positions\r\n def see_future(self,decker):\r\n if decker.cards_left() < 3:\r\n for i in range(decker.cards_left()):\r\n card = decker.draw_top(i)\r\n print(card.type)\r\n decker.add_card(card,i)\r\n else:\r\n for i in range(3):\r\n card = decker.draw_top(i)\r\n print(card.type)\r\n decker.add_card(card,i) ",
"step-ids": [
4,
6,
8,
10,
12
]
}
|
[
4,
6,
8,
10,
12
] |
import numpy as np
from scipy.stats import loguniform
import sys
def generate_parameters(seed):
np.random.seed(seed)
out={}
out['nfeatures'] = np.random.randint(3, 25)
out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))
out['gamma'] = np.random.uniform(0.75, 0.05)
out['penalty'] = float(loguniform.rvs(0.00001, 0.1, size=1))
out['batch'] = np.random.choice([32,64])
return out
if __name__ == '__main__':
out = generate_parameters(int(sys.argv[1]))
out_str = '--nfeatures {} --lr {} --gamma {} --penalty {} --batch {}'.format(out['nfeatures'], out['lr'], out['gamma'], out['penalty'], out['batch'])
print(out_str)
|
normal
|
{
"blob_id": "7571e86be1077ae0f7ae542824cfcaaa2949dc83",
"index": 8731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_parameters(seed):\n np.random.seed(seed)\n out = {}\n out['nfeatures'] = np.random.randint(3, 25)\n out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))\n out['gamma'] = np.random.uniform(0.75, 0.05)\n out['penalty'] = float(loguniform.rvs(1e-05, 0.1, size=1))\n out['batch'] = np.random.choice([32, 64])\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generate_parameters(seed):\n np.random.seed(seed)\n out = {}\n out['nfeatures'] = np.random.randint(3, 25)\n out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))\n out['gamma'] = np.random.uniform(0.75, 0.05)\n out['penalty'] = float(loguniform.rvs(1e-05, 0.1, size=1))\n out['batch'] = np.random.choice([32, 64])\n return out\n\n\nif __name__ == '__main__':\n out = generate_parameters(int(sys.argv[1]))\n out_str = ('--nfeatures {} --lr {} --gamma {} --penalty {} --batch {}'.\n format(out['nfeatures'], out['lr'], out['gamma'], out['penalty'],\n out['batch']))\n print(out_str)\n",
"step-4": "import numpy as np\nfrom scipy.stats import loguniform\nimport sys\n\n\ndef generate_parameters(seed):\n np.random.seed(seed)\n out = {}\n out['nfeatures'] = np.random.randint(3, 25)\n out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))\n out['gamma'] = np.random.uniform(0.75, 0.05)\n out['penalty'] = float(loguniform.rvs(1e-05, 0.1, size=1))\n out['batch'] = np.random.choice([32, 64])\n return out\n\n\nif __name__ == '__main__':\n out = generate_parameters(int(sys.argv[1]))\n out_str = ('--nfeatures {} --lr {} --gamma {} --penalty {} --batch {}'.\n format(out['nfeatures'], out['lr'], out['gamma'], out['penalty'],\n out['batch']))\n print(out_str)\n",
"step-5": "import numpy as np\nfrom scipy.stats import loguniform\nimport sys\n\ndef generate_parameters(seed):\n np.random.seed(seed)\n out={}\n out['nfeatures'] = np.random.randint(3, 25)\n out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))\n out['gamma'] = np.random.uniform(0.75, 0.05)\n out['penalty'] = float(loguniform.rvs(0.00001, 0.1, size=1))\n out['batch'] = np.random.choice([32,64])\n return out\n\nif __name__ == '__main__':\n out = generate_parameters(int(sys.argv[1]))\n out_str = '--nfeatures {} --lr {} --gamma {} --penalty {} --batch {}'.format(out['nfeatures'], out['lr'], out['gamma'], out['penalty'], out['batch'])\n print(out_str)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
class Task(models.Model):
level = models.PositiveSmallIntegerField()
topic = models.CharField(max_length=100)
content = models.TextField()
correct_answer = models.CharField(max_length=50)
class Answer(models.Model):
content = models.TextField()
user = models.CharField(max_length = 100, null = True)
task = models.ForeignKey(
'Task',
on_delete=models.CASCADE,
)
|
normal
|
{
"blob_id": "06e01dce7e2342be994569099ed51d1fe28eea1c",
"index": 5784,
"step-1": "<mask token>\n\n\nclass Answer(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Task(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length=100, null=True)\n task = models.ForeignKey('Task', on_delete=models.CASCADE)\n",
"step-3": "<mask token>\n\n\nclass Task(models.Model):\n level = models.PositiveSmallIntegerField()\n topic = models.CharField(max_length=100)\n content = models.TextField()\n correct_answer = models.CharField(max_length=50)\n\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length=100, null=True)\n task = models.ForeignKey('Task', on_delete=models.CASCADE)\n",
"step-4": "from django.db import models\n\n\nclass Task(models.Model):\n level = models.PositiveSmallIntegerField()\n topic = models.CharField(max_length=100)\n content = models.TextField()\n correct_answer = models.CharField(max_length=50)\n\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length=100, null=True)\n task = models.ForeignKey('Task', on_delete=models.CASCADE)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Task(models.Model):\n level = models.PositiveSmallIntegerField()\n topic = models.CharField(max_length=100)\n content = models.TextField()\n correct_answer = models.CharField(max_length=50)\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length = 100, null = True)\n task = models.ForeignKey(\n 'Task',\n on_delete=models.CASCADE,\n )\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from util import *
def K_step(x):
if not x.shape:
return S.One
assert len(x.shape) == 1
n = x.shape[0]
if n == 2:
return x[1]
return Piecewise((1, Equal(n, 1)),
(x[1], Equal(n, 2)),
(K(x[:n - 1]) * x[n - 1] + K(x[:n - 2]), True))
K = Function.K(integer=True, eval=K_step, shape=())
@apply
def apply(self):
assert self.is_K
x = self.arg
n = x.shape[0]
n -= 2
assert n > 0
return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])
@prove
def prove(Eq):
x = Symbol(integer=True, shape=(oo,))
n = Symbol(integer=True, positive=True)
Eq << apply(K(x[:n + 2]))
Eq << Eq[-1].this.lhs.defun()
if __name__ == '__main__':
run()
# created on 2021-08-18
|
normal
|
{
"blob_id": "b00c07ee3cdba55800c9701b7b8b0e3c9079e9f8",
"index": 6272,
"step-1": "<mask token>\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\n<mask token>\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\n<mask token>\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n",
"step-3": "<mask token>\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\nK = Function.K(integer=True, eval=K_step, shape=())\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "from util import *\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)), (x[1], Equal(n, 2)), (K(x[:n - 1]) *\n x[n - 1] + K(x[:n - 2]), True))\n\n\nK = Function.K(integer=True, eval=K_step, shape=())\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n Eq << apply(K(x[:n + 2]))\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "from util import *\n\n\n\ndef K_step(x):\n if not x.shape:\n return S.One\n assert len(x.shape) == 1\n n = x.shape[0]\n if n == 2:\n return x[1]\n return Piecewise((1, Equal(n, 1)),\n (x[1], Equal(n, 2)),\n (K(x[:n - 1]) * x[n - 1] + K(x[:n - 2]), True))\n\n\nK = Function.K(integer=True, eval=K_step, shape=())\n\n\n@apply\ndef apply(self):\n assert self.is_K\n x = self.arg\n n = x.shape[0]\n n -= 2\n assert n > 0\n\n return Equal(self, K(x[:n]) + K(x[:n + 1]) * x[n + 1])\n\n\n@prove\ndef prove(Eq):\n x = Symbol(integer=True, shape=(oo,))\n n = Symbol(integer=True, positive=True)\n\n Eq << apply(K(x[:n + 2]))\n\n Eq << Eq[-1].this.lhs.defun()\n\n\nif __name__ == '__main__':\n run()\n# created on 2021-08-18\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from math import *
def eval_loop():
line = input('Please enter a sting')
while True:
if line == 'done':
break
else:
output = eval(line)
print(output)
line = input('Please enter a sting')
eval_loop()
|
normal
|
{
"blob_id": "b0062dde448c450131f578a2afe130ca663f0902",
"index": 2041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef eval_loop():\n line = input('Please enter a sting')\n while True:\n if line == 'done':\n break\n else:\n output = eval(line)\n print(output)\n line = input('Please enter a sting')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef eval_loop():\n line = input('Please enter a sting')\n while True:\n if line == 'done':\n break\n else:\n output = eval(line)\n print(output)\n line = input('Please enter a sting')\n\n\neval_loop()\n",
"step-4": "from math import *\n\n\ndef eval_loop():\n line = input('Please enter a sting')\n while True:\n if line == 'done':\n break\n else:\n output = eval(line)\n print(output)\n line = input('Please enter a sting')\n\n\neval_loop()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
"""
Python class to access Netonix® WISP Switch WebAPI
** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.**
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
"""
import requests
from requests.exceptions import Timeout
from copy import deepcopy
import time
import json
try:
from deepdiff import DeepDiff
DIFF = True
except:
DIFF = False
class Netonix():
def __init__(self):
self.ip = None
self.s = None
self.url = {}
self.url["login"] = "/index.php"
self.url["backup"] = "/api/v1/backup"
self.url["config"] = "/api/v1/config"
self.url["apply"] = "/api/v1/apply"
self.url["confirm"] = "/api/v1/applystatus"
self.url["reboot"] = "/api/v1/reboot"
self.url["restore"] = "/api/v1/restore"
self.url["mac"] = "/api/v1/mactable"
self.url["status"] = "/api/v1/status/30sec"
self.url["id"] = "/api/v1/bootid"
self.url["update"] = "/api/v1/uploadfirmware"
self.url["doupdate"] = "/api/v1/upgradefirmware"
self.config = {}
self.orig_config = None
self.mac = {}
self.status = {}
self.id = ""
def _get(self, url, params=None, timeout=15, **kwargs):
full_url = "https://"+self.ip+self.url[url]
return self.s.get(full_url, params=params, timeout=timeout, **kwargs)
def _post(self, url, data=None, json=None, timeout=15, **kwargs):
full_url = "https://"+self.ip+self.url[url]
return self.s.post(
full_url,
data=data,
json=json,
timeout=timeout,
**kwargs
)
@staticmethod
def _merge_by_key(old, new, key="Number", append=True):
for item in new:
found = False
for old_item in old:
if(key not in old_item):
continue
if(old_item[key] != item[key]):
continue
old_item.update(item)
found = True
break
if(found is False):
if(append is True):
old_item.append(new)
else:
raise LookupError()
def open(self, ip, user, password):
self.ip = ip
self.s = requests.session()
self.s.verify = False
data = {}
data["username"] = user
data["password"] = password
r = self._post("login", data)
if("Invalid username or password" in r.text):
raise Exception("Invalid username or password")
def getConfig(self):
r = self._get("config")
result = r.json()
if("Config_Version" in result):
self.config = result
def putConfig(self):
r = self._post("config", json=self.config)
try:
r = self._post("apply")
except Timeout:
pass
self.ip = self.config["IPv4_Address"]
for a in range(5):
try:
r = self._post("confirm")
except Timeout:
continue
break
if(r.status_code != requests.codes.ok):
raise Exception("Config Confirm Request Failed")
# return r.json()
def backup(self, output):
r = self.s.get("https://"+self.ip+self.url["backup"]+"/"+self.ip)
if(r.status_code != requests.codes.ok):
raise Exception("Backup Request Failed")
newFile = open(output, "wb")
newFile.write(r.content)
newFile.close()
def restore(self, i):
raise Exception("the restore method is still untested.")
newFile = open(i, "rb")
data = ""
for a in newFile:
data += a
newFile.close()
r = self._post("restore", data)
print(r.json())
if(r.status_code != requests.codes.ok):
raise Exception("Restore Request Failed")
r = self._get("reboot")
return r.json()
def getMAC(self):
r = self._get("mac", timeout=60)
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.mac = r.json()["MACTable"]
def getID(self):
r = self._get("id", params={"_": time.time()})
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.id = r.json()["BootID"]
def getStatus(self):
if(self.id == ""):
self.getID()
r = self.s.get("https://"+self.ip+self.url["status"]+"?%s&_=%d" % (self.id, time.time()))
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.status = r.json()
def update(self, i):
data = ""
with open(i, mode='rb') as file: # b is important -> binary
data = file.read()
r = self._post("update", data)
if(r.status_code != requests.codes.ok):
raise Exception("Firmware Upload Failed")
r = self._get("doupdate")
if(r.status_code != requests.codes.ok):
raise Exception("Update Request Failed")
def mergeConfig(self, config):
self.orig_config = deepcopy(self.config)
for k, v in config.items():
if(k == "Ports"):
self._merge_by_key(self.config[k], v, key="Number")
continue
if(k == "LACP"):
self._merge_by_key(self.config[k], v, key="Port")
continue
if(k == "VLANs"):
self._merge_by_key(self.config[k], v, key="ID")
continue
if(type(v) is dict):
continue
if(type(v) is list):
self.config[k] += v
continue
self.config[k] = v
def replaceConfig(self, config):
self.orig_config = deepcopy(self.config)
if("Config_Version" in config):
del config["Config_Version"]
self.config.update(config)
def getDiff(self):
if(self.orig_config is None):
return {}
if(DIFF is False):
raise ImportError("Missing DeepDiff Module")
return DeepDiff(
self.orig_config,
self.config,
exclude_paths="root['Config_Version']"
)
if __name__ == '__main__':
import getpass
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ip = str(input("switch ip:"))
user = str(input("user:"))
pw = getpass.getpass("password:")
n = Netonix()
n.open(ip, user, pw)
n.getMAC()
print(json.dumps(n.mac, indent=4))
n.getMAC()
print(json.dumps(n.mac, indent=4))
|
normal
|
{
"blob_id": "743d261052e4532c1304647501719ad897224b4e",
"index": 8991,
"step-1": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n <mask token>\n <mask token>\n <mask token>\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n <mask token>\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n <mask token>\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n\n def getID(self):\n r = self._get('id', params={'_': time.time()})\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.id = r.json()['BootID']\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n\n def getDiff(self):\n if self.orig_config is None:\n return {}\n if DIFF is False:\n raise ImportError('Missing DeepDiff Module')\n return DeepDiff(self.orig_config, self.config, exclude_paths=\n \"root['Config_Version']\")\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Netonix:\n\n def __init__(self):\n self.ip = None\n self.s = None\n self.url = {}\n self.url['login'] = '/index.php'\n self.url['backup'] = '/api/v1/backup'\n self.url['config'] = '/api/v1/config'\n self.url['apply'] = '/api/v1/apply'\n self.url['confirm'] = '/api/v1/applystatus'\n self.url['reboot'] = '/api/v1/reboot'\n self.url['restore'] = '/api/v1/restore'\n self.url['mac'] = '/api/v1/mactable'\n self.url['status'] = '/api/v1/status/30sec'\n self.url['id'] = '/api/v1/bootid'\n self.url['update'] = '/api/v1/uploadfirmware'\n self.url['doupdate'] = '/api/v1/upgradefirmware'\n self.config = {}\n self.orig_config = None\n self.mac = {}\n self.status = {}\n self.id = ''\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n\n def getID(self):\n r = self._get('id', params={'_': time.time()})\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.id = r.json()['BootID']\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n\n def getDiff(self):\n if self.orig_config is None:\n return {}\n if DIFF is False:\n raise ImportError('Missing DeepDiff Module')\n return DeepDiff(self.orig_config, self.config, exclude_paths=\n \"root['Config_Version']\")\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"\nPython class to access Netonix® WISP Switch WebAPI\n\n** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.**\n\nThis is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to <http://unlicense.org/>\n\"\"\"\n\nimport requests\nfrom requests.exceptions import Timeout\nfrom copy import deepcopy\nimport time\nimport json\ntry:\n from deepdiff import DeepDiff\n DIFF = True\nexcept:\n DIFF = False\n\nclass Netonix():\n def __init__(self):\n self.ip = None\n self.s = None\n self.url = {}\n self.url[\"login\"] = \"/index.php\"\n self.url[\"backup\"] = \"/api/v1/backup\"\n self.url[\"config\"] = \"/api/v1/config\"\n self.url[\"apply\"] = \"/api/v1/apply\"\n self.url[\"confirm\"] = \"/api/v1/applystatus\"\n self.url[\"reboot\"] = \"/api/v1/reboot\"\n self.url[\"restore\"] = \"/api/v1/restore\"\n self.url[\"mac\"] = \"/api/v1/mactable\"\n self.url[\"status\"] = \"/api/v1/status/30sec\"\n self.url[\"id\"] = \"/api/v1/bootid\"\n self.url[\"update\"] = \"/api/v1/uploadfirmware\"\n self.url[\"doupdate\"] = \"/api/v1/upgradefirmware\"\n self.config = {}\n self.orig_config = None\n self.mac = {}\n self.status = {}\n self.id = \"\"\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = \"https://\"+self.ip+self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n\n def _post(self, url, data=None, json=None, timeout=15, **kwargs):\n full_url = \"https://\"+self.ip+self.url[url]\n return self.s.post(\n full_url,\n data=data,\n json=json,\n timeout=timeout,\n **kwargs\n )\n\n @staticmethod\n def _merge_by_key(old, new, key=\"Number\", append=True):\n for item in new:\n found = False\n for old_item in old:\n if(key not in old_item):\n continue\n if(old_item[key] != item[key]):\n continue\n old_item.update(item)\n found = True\n break\n if(found is False):\n if(append is True):\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data[\"username\"] = user\n data[\"password\"] = password\n r = self._post(\"login\", data)\n if(\"Invalid username or password\" in r.text):\n raise Exception(\"Invalid username or password\")\n\n def getConfig(self):\n r = self._get(\"config\")\n result = r.json()\n if(\"Config_Version\" in result):\n self.config = result\n\n def putConfig(self):\n r = self._post(\"config\", json=self.config)\n try:\n r = self._post(\"apply\")\n except Timeout:\n pass\n self.ip = self.config[\"IPv4_Address\"]\n for a in range(5):\n try:\n r = self._post(\"confirm\")\n except Timeout:\n continue\n break\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Config Confirm Request Failed\")\n # return r.json()\n\n def backup(self, output):\n r = self.s.get(\"https://\"+self.ip+self.url[\"backup\"]+\"/\"+self.ip)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Backup Request Failed\")\n newFile = open(output, \"wb\")\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception(\"the restore method is still untested.\")\n newFile = open(i, \"rb\")\n data = \"\"\n for a in newFile:\n data += a\n newFile.close()\n r = self._post(\"restore\", data)\n print(r.json())\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Restore Request Failed\")\n r = self._get(\"reboot\")\n return r.json()\n\n def getMAC(self):\n r = self._get(\"mac\", timeout=60)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.mac = r.json()[\"MACTable\"]\n\n def getID(self):\n r = self._get(\"id\", params={\"_\": time.time()})\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.id = r.json()[\"BootID\"]\n\n def getStatus(self):\n if(self.id == \"\"):\n self.getID()\n r = self.s.get(\"https://\"+self.ip+self.url[\"status\"]+\"?%s&_=%d\" % (self.id, time.time()))\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.status = r.json()\n\n def update(self, i):\n data = \"\"\n with open(i, mode='rb') as file: # b is important -> binary\n data = file.read()\n r = self._post(\"update\", data)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Firmware Upload Failed\")\n r = self._get(\"doupdate\")\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Update Request Failed\")\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n\n for k, v in config.items():\n if(k == \"Ports\"):\n self._merge_by_key(self.config[k], v, key=\"Number\")\n continue\n if(k == \"LACP\"):\n self._merge_by_key(self.config[k], v, key=\"Port\")\n continue\n if(k == \"VLANs\"):\n self._merge_by_key(self.config[k], v, key=\"ID\")\n continue\n if(type(v) is dict):\n continue\n if(type(v) is list):\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n\n if(\"Config_Version\" in config):\n del config[\"Config_Version\"]\n self.config.update(config)\n\n def getDiff(self):\n if(self.orig_config is None):\n return {}\n if(DIFF is False):\n raise ImportError(\"Missing DeepDiff Module\")\n return DeepDiff(\n self.orig_config,\n self.config,\n exclude_paths=\"root['Config_Version']\"\n )\n\n\nif __name__ == '__main__':\n import getpass\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n ip = str(input(\"switch ip:\"))\n user = str(input(\"user:\"))\n pw = getpass.getpass(\"password:\")\n n = Netonix()\n n.open(ip, user, pw)\n n.getMAC()\n print(json.dumps(n.mac, indent=4))\n n.getMAC()\n print(json.dumps(n.mac, indent=4))\n",
"step-ids": [
9,
11,
13,
14,
20
]
}
|
[
9,
11,
13,
14,
20
] |
#Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importing the data
dataset=pd.read_csv('Social_Network_Ads.csv')
X=dataset.iloc[:,0:2].values
y=dataset.iloc[:,2].values
#spiliting the data into training data and testing data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y)
#feature Scaling to improve the predictions
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
#training the logistic regression on the model
from sklearn.linear_model import LogisticRegression
log=LogisticRegression()
log.fit(X_train,y_train)
#predicting the new result
log.predict(sc.transform([[45,87000]]))
#predicting the test set results
y_pred=log.predict(X_test)
np.set_printoptions(precision=2)
np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)
#confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test,y_pred)
#accuracy score
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred)
|
normal
|
{
"blob_id": "149f8b453786ec54668a55ec349ac157d2b93b5d",
"index": 2397,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog.fit(X_train, y_train)\nlog.predict(sc.transform([[45, 87000]]))\n<mask token>\nnp.set_printoptions(precision=2)\nnp.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),\n 1)), 1)\n<mask token>\nconfusion_matrix(y_test, y_pred)\n<mask token>\naccuracy_score(y_test, y_pred)\n",
"step-3": "<mask token>\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, 0:2].values\ny = dataset.iloc[:, 2].values\n<mask token>\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n<mask token>\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n<mask token>\nlog = LogisticRegression()\nlog.fit(X_train, y_train)\nlog.predict(sc.transform([[45, 87000]]))\ny_pred = log.predict(X_test)\nnp.set_printoptions(precision=2)\nnp.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),\n 1)), 1)\n<mask token>\nconfusion_matrix(y_test, y_pred)\n<mask token>\naccuracy_score(y_test, y_pred)\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, 0:2].values\ny = dataset.iloc[:, 2].values\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nfrom sklearn.linear_model import LogisticRegression\nlog = LogisticRegression()\nlog.fit(X_train, y_train)\nlog.predict(sc.transform([[45, 87000]]))\ny_pred = log.predict(X_test)\nnp.set_printoptions(precision=2)\nnp.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),\n 1)), 1)\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test, y_pred)\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_pred)\n",
"step-5": "#Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#importing the data\r\ndataset=pd.read_csv('Social_Network_Ads.csv')\r\nX=dataset.iloc[:,0:2].values\r\ny=dataset.iloc[:,2].values\r\n\r\n#spiliting the data into training data and testing data\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y)\r\n\r\n#feature Scaling to improve the predictions \r\nfrom sklearn.preprocessing import StandardScaler\r\nsc=StandardScaler()\r\nX_train=sc.fit_transform(X_train)\r\nX_test=sc.transform(X_test)\r\n\r\n#training the logistic regression on the model\r\nfrom sklearn.linear_model import LogisticRegression\r\nlog=LogisticRegression()\r\nlog.fit(X_train,y_train)\r\n\r\n#predicting the new result\r\nlog.predict(sc.transform([[45,87000]]))\r\n\r\n#predicting the test set results\r\ny_pred=log.predict(X_test)\r\nnp.set_printoptions(precision=2)\r\nnp.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)\r\n\r\n#confusion matrix\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_matrix(y_test,y_pred)\r\n\r\n#accuracy score\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(y_test,y_pred)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = "fivethirtyeight"
self.values = values
self.labels = labels
self.explode = [0 for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode, shadow=True,
startangle=90, autopct='%1.1f%%',
wedgeprops={'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
|
normal
|
{
"blob_id": "58385a7713a8f88925ced714d25f1522bc7e39d8",
"index": 1181,
"step-1": "<mask token>\n\n\nclass Scatter:\n <mask token>\n <mask token>\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-2": "<mask token>\n\n\nclass Scatter:\n\n def __init__(self, values, ylabel, title):\n self.values = values\n self.range = list(range(len(values)))\n self.ylabel = ylabel\n self.title = title\n <mask token>\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-3": "<mask token>\n\n\nclass Scatter:\n\n def __init__(self, values, ylabel, title):\n self.values = values\n self.range = list(range(len(values)))\n self.ylabel = ylabel\n self.title = title\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.scatter(self.range, self.values, color='r', s=1)\n ax.set_xlabel('Days')\n ax.set_ylabel(self.ylabel)\n ax.set_title(self.title)\n plt.ylim(0, self.values[-1])\n plt.show()\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-4": "import matplotlib.pyplot as plt\n\n\nclass Scatter:\n\n def __init__(self, values, ylabel, title):\n self.values = values\n self.range = list(range(len(values)))\n self.ylabel = ylabel\n self.title = title\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.scatter(self.range, self.values, color='r', s=1)\n ax.set_xlabel('Days')\n ax.set_ylabel(self.ylabel)\n ax.set_title(self.title)\n plt.ylim(0, self.values[-1])\n plt.show()\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-5": "import matplotlib.pyplot as plt\r\n\r\n\r\nclass Scatter:\r\n def __init__(self, values, ylabel, title):\r\n self.values = values\r\n self.range = list(range(len(values)))\r\n self.ylabel = ylabel\r\n self.title = title\r\n\r\n def plot(self):\r\n fig = plt.figure()\r\n ax = fig.add_axes([0, 0, 1, 1])\r\n ax.scatter(self.range, self.values, color='r', s=1)\r\n ax.set_xlabel('Days')\r\n ax.set_ylabel(self.ylabel)\r\n ax.set_title(self.title)\r\n plt.ylim(0, self.values[-1])\r\n plt.show()\r\n\r\n\r\nclass Pie:\r\n def __init__(self, values, labels, title):\r\n self.style = \"fivethirtyeight\"\r\n self.values = values\r\n self.labels = labels\r\n self.explode = [0 for i in range(len(values))]\r\n self.title = title\r\n\r\n def plot(self):\r\n plt.style.use(self.style)\r\n\r\n plt.pie(self.values, labels=self.labels, explode=self.explode, shadow=True,\r\n startangle=90, autopct='%1.1f%%',\r\n wedgeprops={'edgecolor': 'black'})\r\n\r\n plt.title(self.title)\r\n plt.tight_layout()\r\n\r\n plt.show()\r\n\r\n\r\nclass Column:\r\n pass",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import torch
import torch.nn as nn
class DehazeNet(nn.Module):
def __init__(self, input=16, groups=4):
super(DehazeNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=3, padding=1)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=5, padding=2)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=7, padding=3)
self.relu4 = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)
self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)
def forward(self, x):
#feature extraction
out = self.conv1(x)
out = self.relu1(out)
#maxout
max_1 = torch.max(out[:,0:4,:,:],out[:,4:8,:,:])
max_2 = torch.max(out[:,8:12,:,:],out[:,12:16,:,:])
out = torch.max(max_1,max_2)
#multi-scale Mapping
out1 = self.conv2(out)
out1 = self.relu2(out1)
out2 = self.conv3(out)
out2 = self.relu3(out2)
out3 = self.conv4(out)
out3 = self.relu4(out3)
y = torch.cat((out1,out2,out3), dim=1)
#Local Extremum
y = self.maxpool(y)
#non-linear Regression
y = self.conv5(y)
y = torch.max(y, torch.zeros(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())
y = torch.min(y, torch.ones(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())
return y
|
normal
|
{
"blob_id": "a8cf8d0965cb877d50cee403fbc30f27484f4f36",
"index": 8201,
"step-1": "<mask token>\n\n\nclass DehazeNet(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DehazeNet(nn.Module):\n\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DehazeNet(nn.Module):\n\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.relu1(out)\n max_1 = torch.max(out[:, 0:4, :, :], out[:, 4:8, :, :])\n max_2 = torch.max(out[:, 8:12, :, :], out[:, 12:16, :, :])\n out = torch.max(max_1, max_2)\n out1 = self.conv2(out)\n out1 = self.relu2(out1)\n out2 = self.conv3(out)\n out2 = self.relu3(out2)\n out3 = self.conv4(out)\n out3 = self.relu4(out3)\n y = torch.cat((out1, out2, out3), dim=1)\n y = self.maxpool(y)\n y = self.conv5(y)\n y = torch.max(y, torch.zeros(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n y = torch.min(y, torch.ones(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n return y\n",
"step-4": "import torch\nimport torch.nn as nn\n\n\nclass DehazeNet(nn.Module):\n\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.relu1(out)\n max_1 = torch.max(out[:, 0:4, :, :], out[:, 4:8, :, :])\n max_2 = torch.max(out[:, 8:12, :, :], out[:, 12:16, :, :])\n out = torch.max(max_1, max_2)\n out1 = self.conv2(out)\n out1 = self.relu2(out1)\n out2 = self.conv3(out)\n out2 = self.relu3(out2)\n out3 = self.conv4(out)\n out3 = self.relu4(out3)\n y = torch.cat((out1, out2, out3), dim=1)\n y = self.maxpool(y)\n y = self.conv5(y)\n y = torch.max(y, torch.zeros(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n y = torch.min(y, torch.ones(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n return y\n",
"step-5": "import torch\nimport torch.nn as nn\n\nclass DehazeNet(nn.Module):\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n \n \n def forward(self, x):\n #feature extraction\n out = self.conv1(x)\n out = self.relu1(out)\n #maxout\n max_1 = torch.max(out[:,0:4,:,:],out[:,4:8,:,:])\n max_2 = torch.max(out[:,8:12,:,:],out[:,12:16,:,:])\n out = torch.max(max_1,max_2)\n\n #multi-scale Mapping\n out1 = self.conv2(out)\n out1 = self.relu2(out1)\n out2 = self.conv3(out)\n out2 = self.relu3(out2)\n out3 = self.conv4(out)\n out3 = self.relu4(out3)\n y = torch.cat((out1,out2,out3), dim=1)\n #Local Extremum\n y = self.maxpool(y)\n #non-linear Regression\n y = self.conv5(y)\n y = torch.max(y, torch.zeros(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())\n y = torch.min(y, torch.ones(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())\n return y",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import spacy
nlp = spacy.load("en_core_web_sm")
text = (
"Chick-fil-A is an American fast food restaurant chain headquartered in "
"the city of College Park, Georgia, specializing in chicken sandwiches."
)
# Disable the tagger and parser
with ____.____(____):
# Process the text
doc = ____
# Print the entities in the doc
print(____)
|
normal
|
{
"blob_id": "6eecf0ff1ad762089db6e9498e906e68b507370c",
"index": 1875,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith ____.____(____):\n doc = ____\n print(____)\n",
"step-3": "<mask token>\nnlp = spacy.load('en_core_web_sm')\ntext = (\n 'Chick-fil-A is an American fast food restaurant chain headquartered in the city of College Park, Georgia, specializing in chicken sandwiches.'\n )\nwith ____.____(____):\n doc = ____\n print(____)\n",
"step-4": "import spacy\nnlp = spacy.load('en_core_web_sm')\ntext = (\n 'Chick-fil-A is an American fast food restaurant chain headquartered in the city of College Park, Georgia, specializing in chicken sandwiches.'\n )\nwith ____.____(____):\n doc = ____\n print(____)\n",
"step-5": "import spacy\n\nnlp = spacy.load(\"en_core_web_sm\")\ntext = (\n \"Chick-fil-A is an American fast food restaurant chain headquartered in \"\n \"the city of College Park, Georgia, specializing in chicken sandwiches.\"\n)\n\n# Disable the tagger and parser\nwith ____.____(____):\n # Process the text\n doc = ____\n # Print the entities in the doc\n print(____)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import subprocess
import discord
import asyncio
import traceback
import sys
import ast
from discord.ext import commands
# Import Cogs
from cogs.misc import Miscellaneous
from cogs.serversettings import ServerSettings
from cogs.mod import Moderator
from cogs.automod import AutoMod
from cogs.google import Google
# Minigame/Fun Cogs
from cogs.fun import Fun
#from cogs.hangman import Hangman
#from cogs.rps import RockPaperScissors
from otherscipts.helpers import update_presence
from otherscipts.data import Data
TOKEN = os.getenv('SPARTA_TOKEN')
intents = discord.Intents.default()
intents.members = True
def get_prefix(client, message):
if str(message.guild.id) not in Data.server_data:
Data.server_data[str(message.guild.id)] = Data.create_new_data()
data = Data.server_data[str(message.guild.id)]
return data["prefix"]
PREFIX = get_prefix
bot = commands.Bot(
command_prefix=PREFIX,
description="I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.",
intents=intents,
help_command=None,
case_insensitive=True
)
THEME_COLOR = discord.Colour.blue()
# Add Cogs
bot.add_cog(Miscellaneous(bot, THEME_COLOR))
bot.add_cog(ServerSettings(bot, THEME_COLOR))
bot.add_cog(Moderator(bot, THEME_COLOR))
bot.add_cog(AutoMod(bot, THEME_COLOR))
bot.add_cog(Fun(bot, THEME_COLOR))
bot.add_cog(Google(bot, THEME_COLOR))
#bot.add_cog(Hangman(bot, THEME_COLOR))
#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))
previous_msg_sender_id = None
@bot.event
async def on_ready():
bot.loop.create_task(Data.auto_update_data())
bot.loop.create_task(update_presence(bot, PREFIX))
print("Bot is ready...")
@bot.event
async def on_guild_join(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Joined - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_guild_remove(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Left - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_member_join(member):
guild: discord.Guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has joined {guild} server...")
join_role = guild.get_role(data["join_role"])
if join_role is not None:
await member.add_roles(join_role)
# Welcome Message
if data["welcome_msg"] is None:
server_wlcm_msg = f"Welcome, {member.mention}, to the Official **{guild.name}** Server"
else:
server_wlcm_msg = data["welcome_msg"]
server_wlcm_msg = server_wlcm_msg.replace(
"[mention]", f"{member.mention}")
# Welcome Channel
wel_channel = None
if data["welcome_channel"] is None:
for channel in channels:
if str(channel).find("welcome") != -1:
wel_channel = channel
break
else:
wel_channel = guild.get_channel(int(data["welcome_channel"]))
try:
await wel_channel.send(server_wlcm_msg)
except AttributeError:
print("DEBUG: No welcome channel has been set or found.")
#Remove welcome channel
@bot.command(name="remove_welcome", aliases=['rwel', 'remwel'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome(ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["welcome_channel"] = channel
await ctx.send("This server's welcome channel has been removed")
@bot.event
async def on_member_remove(member):
guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has left the {guild.name}...")
# Leave Message
if data["leave_msg"] is None:
server_leave_msg = f"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server"
else:
server_leave_msg = data["leave_msg"]
server_leave_msg = server_leave_msg.replace("[member]", f"{member}")
# Leave Channel
lv_channel = None
if data["leave_channel"] is None:
for channel in channels:
if str(channel).find("bye") != -1 or str(channel).find("leave") != -1:
lv_channel = channel
break
else:
lv_channel = guild.get_channel(int(data["leave_channel"]))
try:
await lv_channel.send(server_leave_msg)
except AttributeError:
print("DEBUG: No leave channel has been set or found.")
#Remove leave
@bot.command(name="remove_leave", aliases=['rleave', 'remleave'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome( ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["leave_channel"] = channel
await ctx.send("This server's leave channel has been Removed")
@bot.event
async def on_command_error(ctx, error):
try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) is discord.ext.commands.errors.BadArgument:
pass
elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:
pass
elif type(error) is discord.ext.commands.errors.NoPrivateMessage:
pass
elif type(error) is discord.ext.commands.errors.MissingPermissions:
pass
elif type(error) is discord.ext.commands.errors.NotOwner:
pass
elif type(error) is discord.ext.commands.errors.CommandOnCooldown:
pass
elif type(error) is discord.ext.commands.errors.ChannelNotFound:
pass
elif type(error) is discord.ext.commands.errors.BadUnionArgument:
pass
elif type(error) is discord.ext.commands.errors.BotMissingPermissions:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have permission to do that!"
else:
print(f"Error {type(error)}: {error}")
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
embed = discord.Embed(
title='Error!',
description='An unexpected error ocurred.\
Please report this to the dev.',
)
embed.add_field(
name='Error Message:',
value=f"{type(error)}:\n{error}",
inline=False
)
await ctx.send(f"{error}")
# LABEL: Programming Commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
"""Evaluates input.
Input is interpreted as newline seperated statements.
If the last statement is an expression, that is the return value.
Usable globals:
- `bot`: the bot instance
- `discord`: the discord module
- `commands`: the discord.ext.commands module
- `ctx`: the invokation context
- `__import__`: the builtin `__import__` function
Such that `>eval 1 + 1` gives `2` as the result.
The following invokation will cause the bot to send the text '9'
to the channel of invokation and return '3' as the result of evaluating
>eval ```
a = 1 + 2
b = a * 2
await ctx.send(a + b)
a
```
"""
if ctx.message.author.id not in [400857098121904149, 733532987794128897]:
await ctx.send("You are not authorized to run this command")
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
# add a layer of indentation
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# wrap in async def body
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = (await eval(f"{fn_name}()", env))
await ctx.send(result)
# LABEL: Debugging Commands
@bot.command(name="data")
async def data(ctx):
is_owner = await bot.is_owner(ctx.author)
if is_owner or ctx.author.id == 733532987794128897: # for real sparta
data_file = discord.File("data.json")
await ctx.send(file=data_file)
@bot.event
async def on_message(message: discord.Message):
global previous_msg_sender_id
if message.author.bot:
return
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
guild: discord.Guild = message.guild
# print(str(author), ": ", message.content)
await bot.process_commands(message)
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
if message.content.replace('!', '') == bot.user.mention:
pre = data["prefix"]
await channel.send(f"The prefix in this server is `{pre}`")
for afk_user_entry in data["afks"]:
afk_user_id = int(afk_user_entry["user"])
afk_reason = afk_user_entry["reason"]
afk_user = guild.get_member(afk_user_id)
if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:
Data.server_data[str(guild.id)]["afks"].remove(afk_user_entry)
await channel.send(f"**{afk_user}** is no longer AFK.")
elif afk_user in message.mentions:
await channel.send(f"**{afk_user}** is currently AFK because **{afk_reason}**.")
if data["pay_respects"] and message.content.strip().lower() == "f":
await channel.send(f"**{author.display_name}** has paid their respects...")
if data["active"] and str(author.id) not in data["users"]:
if not str(channel.id) in data["channels"]:
perms = author.permissions_in(channel)
if not perms.administrator:
if "http://" in message.content or "https://" in message.content:
if len(data["urls"]) > 0:
for url in data["urls"]:
if not url in message.content:
await channel.purge(limit=1)
msg1 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(2)
await msg1.delete()
else:
await channel.purge(limit=1)
msg2 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(3)
await msg2.delete()
elif len(message.attachments) > 0:
await channel.purge(limit=1)
msg3 = await channel.send(f"{author.mention}, you are not allowed to send attachments in this channel.")
await asyncio.sleep(3)
await msg3.delete()
previous_msg_sender_id = author.id
bot.run(TOKEN)
|
normal
|
{
"blob_id": "4f9729e396e01cb3d6c9011f79a1ebe618a8e762",
"index": 7787,
"step-1": "<mask token>\n\n\ndef insert_returns(body):\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n if isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n if isinstance(body[-1], ast.With):\n insert_returns(body[-1].body)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_prefix(client, message):\n if str(message.guild.id) not in Data.server_data:\n Data.server_data[str(message.guild.id)] = Data.create_new_data()\n data = Data.server_data[str(message.guild.id)]\n return data['prefix']\n\n\n<mask token>\nbot.add_cog(Miscellaneous(bot, THEME_COLOR))\nbot.add_cog(ServerSettings(bot, THEME_COLOR))\nbot.add_cog(Moderator(bot, THEME_COLOR))\nbot.add_cog(AutoMod(bot, THEME_COLOR))\nbot.add_cog(Fun(bot, THEME_COLOR))\nbot.add_cog(Google(bot, THEME_COLOR))\n<mask token>\n\n\[email protected]\nasync def on_ready():\n bot.loop.create_task(Data.auto_update_data())\n bot.loop.create_task(update_presence(bot, PREFIX))\n print('Bot is ready...')\n\n\[email protected]\nasync def on_guild_join(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(\n f'Joined - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}'\n )\n\n\[email protected]\nasync def on_guild_remove(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(\n f'Left - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}')\n\n\[email protected]\nasync def on_member_join(member):\n guild: discord.Guild = member.guild\n channels = guild.channels\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n print(f'{member} has joined {guild} server...')\n join_role = guild.get_role(data['join_role'])\n if join_role is not None:\n await member.add_roles(join_role)\n if data['welcome_msg'] is None:\n server_wlcm_msg = (\n f'Welcome, {member.mention}, to the Official **{guild.name}** Server'\n )\n else:\n server_wlcm_msg = data['welcome_msg']\n server_wlcm_msg = server_wlcm_msg.replace('[mention]',\n f'{member.mention}')\n wel_channel = None\n if data['welcome_channel'] is None:\n for channel in channels:\n if str(channel).find('welcome') != -1:\n wel_channel = channel\n break\n else:\n wel_channel = guild.get_channel(int(data['welcome_channel']))\n try:\n await wel_channel.send(server_wlcm_msg)\n except AttributeError:\n print('DEBUG: No welcome channel has been set or found.')\n\n\[email protected](name='remove_welcome', aliases=['rwel', 'remwel'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n Data.server_data[str(ctx.guild.id)]['welcome_channel'] = channel\n await ctx.send(\"This server's welcome channel has been removed\")\n\n\[email protected]\nasync def on_member_remove(member):\n guild = member.guild\n channels = guild.channels\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n print(f'{member} has left the {guild.name}...')\n if data['leave_msg'] is None:\n server_leave_msg = (\n f'Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server'\n )\n else:\n server_leave_msg = data['leave_msg']\n server_leave_msg = server_leave_msg.replace('[member]', f'{member}')\n lv_channel = None\n if data['leave_channel'] is None:\n for channel in channels:\n if str(channel).find('bye') != -1 or str(channel).find('leave'\n ) != -1:\n lv_channel = channel\n break\n else:\n lv_channel = guild.get_channel(int(data['leave_channel']))\n try:\n await lv_channel.send(server_leave_msg)\n except AttributeError:\n print('DEBUG: No leave channel has been set or found.')\n\n\[email protected](name='remove_leave', aliases=['rleave', 'remleave'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n Data.server_data[str(ctx.guild.id)]['leave_channel'] = channel\n await ctx.send(\"This server's leave channel has been Removed\")\n\n\[email protected]\nasync def on_command_error(ctx, error):\n try:\n error = error.original\n except Exception:\n pass\n if type(error) is discord.ext.commands.errors.CommandNotFound:\n return\n elif type(error) is discord.ext.commands.errors.BadArgument:\n pass\n elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:\n pass\n elif type(error) is discord.ext.commands.errors.NoPrivateMessage:\n pass\n elif type(error) is discord.ext.commands.errors.MissingPermissions:\n pass\n elif type(error) is discord.ext.commands.errors.NotOwner:\n pass\n elif type(error) is discord.ext.commands.errors.CommandOnCooldown:\n pass\n elif type(error) is discord.ext.commands.errors.ChannelNotFound:\n pass\n elif type(error) is discord.ext.commands.errors.BadUnionArgument:\n pass\n elif type(error) is discord.ext.commands.errors.BotMissingPermissions:\n pass\n elif type(error) is discord.errors.Forbidden:\n error = \"I don't have permission to do that!\"\n else:\n print(f'Error {type(error)}: {error}')\n traceback.print_exception(type(error), error, error.__traceback__,\n file=sys.stderr)\n embed = discord.Embed(title='Error!', description=\n 'An unexpected error ocurred. Please report this to the dev.'\n )\n embed.add_field(name='Error Message:', value=\n f'{type(error)}:\\n{error}', inline=False)\n await ctx.send(f'{error}')\n\n\ndef insert_returns(body):\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n if isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n if isinstance(body[-1], ast.With):\n insert_returns(body[-1].body)\n\n\[email protected](name='eval')\nasync def eval_fn(ctx, *, cmd):\n \"\"\"Evaluates input.\n Input is interpreted as newline seperated statements.\n If the last statement is an expression, that is the return value.\n Usable globals:\n - `bot`: the bot instance\n - `discord`: the discord module\n - `commands`: the discord.ext.commands module\n - `ctx`: the invokation context\n - `__import__`: the builtin `__import__` function\n Such that `>eval 1 + 1` gives `2` as the result.\n The following invokation will cause the bot to send the text '9'\n to the channel of invokation and return '3' as the result of evaluating\n >eval ```\n a = 1 + 2\n b = a * 2\n await ctx.send(a + b)\n a\n ```\n \"\"\"\n if ctx.message.author.id not in [400857098121904149, 733532987794128897]:\n await ctx.send('You are not authorized to run this command')\n return\n fn_name = '_eval_expr'\n cmd = cmd.strip('` ')\n cmd = '\\n'.join(f' {i}' for i in cmd.splitlines())\n body = f'async def {fn_name}():\\n{cmd}'\n parsed = ast.parse(body)\n body = parsed.body[0].body\n insert_returns(body)\n env = {'bot': ctx.bot, 'discord': discord, 'commands': commands, 'ctx':\n ctx, '__import__': __import__}\n exec(compile(parsed, filename='<ast>', mode='exec'), env)\n result = await eval(f'{fn_name}()', env)\n await ctx.send(result)\n\n\[email protected](name='data')\nasync def data(ctx):\n is_owner = await bot.is_owner(ctx.author)\n if is_owner or ctx.author.id == 733532987794128897:\n data_file = discord.File('data.json')\n await ctx.send(file=data_file)\n\n\[email protected]\nasync def on_message(message: discord.Message):\n global previous_msg_sender_id\n if message.author.bot:\n return\n author: discord.Member = message.author\n channel: discord.TextChannel = message.channel\n guild: discord.Guild = message.guild\n await bot.process_commands(message)\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n if message.content.replace('!', '') == bot.user.mention:\n pre = data['prefix']\n await channel.send(f'The prefix in this server is `{pre}`')\n for afk_user_entry in data['afks']:\n afk_user_id = int(afk_user_entry['user'])\n afk_reason = afk_user_entry['reason']\n afk_user = guild.get_member(afk_user_id)\n if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:\n Data.server_data[str(guild.id)]['afks'].remove(afk_user_entry)\n await channel.send(f'**{afk_user}** is no longer AFK.')\n elif afk_user in message.mentions:\n await channel.send(\n f'**{afk_user}** is currently AFK because **{afk_reason}**.')\n if data['pay_respects'] and message.content.strip().lower() == 'f':\n await channel.send(\n f'**{author.display_name}** has paid their respects...')\n if data['active'] and str(author.id) not in data['users']:\n if not str(channel.id) in data['channels']:\n perms = author.permissions_in(channel)\n if not perms.administrator:\n if ('http://' in message.content or 'https://' in message.\n content):\n if len(data['urls']) > 0:\n for url in data['urls']:\n if not url in message.content:\n await channel.purge(limit=1)\n msg1 = await channel.send(\n f'{author.mention}, you are not allowed to send links in this channel.'\n )\n await asyncio.sleep(2)\n await msg1.delete()\n else:\n await channel.purge(limit=1)\n msg2 = await channel.send(\n f'{author.mention}, you are not allowed to send links in this channel.'\n )\n await asyncio.sleep(3)\n await msg2.delete()\n elif len(message.attachments) > 0:\n await channel.purge(limit=1)\n msg3 = await channel.send(\n f'{author.mention}, you are not allowed to send attachments in this channel.'\n )\n await asyncio.sleep(3)\n await msg3.delete()\n previous_msg_sender_id = author.id\n\n\nbot.run(TOKEN)\n",
"step-3": "<mask token>\nTOKEN = os.getenv('SPARTA_TOKEN')\nintents = discord.Intents.default()\nintents.members = True\n\n\ndef get_prefix(client, message):\n if str(message.guild.id) not in Data.server_data:\n Data.server_data[str(message.guild.id)] = Data.create_new_data()\n data = Data.server_data[str(message.guild.id)]\n return data['prefix']\n\n\nPREFIX = get_prefix\nbot = commands.Bot(command_prefix=PREFIX, description=\n 'I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.',\n intents=intents, help_command=None, case_insensitive=True)\nTHEME_COLOR = discord.Colour.blue()\nbot.add_cog(Miscellaneous(bot, THEME_COLOR))\nbot.add_cog(ServerSettings(bot, THEME_COLOR))\nbot.add_cog(Moderator(bot, THEME_COLOR))\nbot.add_cog(AutoMod(bot, THEME_COLOR))\nbot.add_cog(Fun(bot, THEME_COLOR))\nbot.add_cog(Google(bot, THEME_COLOR))\nprevious_msg_sender_id = None\n\n\[email protected]\nasync def on_ready():\n bot.loop.create_task(Data.auto_update_data())\n bot.loop.create_task(update_presence(bot, PREFIX))\n print('Bot is ready...')\n\n\[email protected]\nasync def on_guild_join(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(\n f'Joined - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}'\n )\n\n\[email protected]\nasync def on_guild_remove(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(\n f'Left - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}')\n\n\[email protected]\nasync def on_member_join(member):\n guild: discord.Guild = member.guild\n channels = guild.channels\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n print(f'{member} has joined {guild} server...')\n join_role = guild.get_role(data['join_role'])\n if join_role is not None:\n await member.add_roles(join_role)\n if data['welcome_msg'] is None:\n server_wlcm_msg = (\n f'Welcome, {member.mention}, to the Official **{guild.name}** Server'\n )\n else:\n server_wlcm_msg = data['welcome_msg']\n server_wlcm_msg = server_wlcm_msg.replace('[mention]',\n f'{member.mention}')\n wel_channel = None\n if data['welcome_channel'] is None:\n for channel in channels:\n if str(channel).find('welcome') != -1:\n wel_channel = channel\n break\n else:\n wel_channel = guild.get_channel(int(data['welcome_channel']))\n try:\n await wel_channel.send(server_wlcm_msg)\n except AttributeError:\n print('DEBUG: No welcome channel has been set or found.')\n\n\[email protected](name='remove_welcome', aliases=['rwel', 'remwel'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n Data.server_data[str(ctx.guild.id)]['welcome_channel'] = channel\n await ctx.send(\"This server's welcome channel has been removed\")\n\n\[email protected]\nasync def on_member_remove(member):\n guild = member.guild\n channels = guild.channels\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n print(f'{member} has left the {guild.name}...')\n if data['leave_msg'] is None:\n server_leave_msg = (\n f'Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server'\n )\n else:\n server_leave_msg = data['leave_msg']\n server_leave_msg = server_leave_msg.replace('[member]', f'{member}')\n lv_channel = None\n if data['leave_channel'] is None:\n for channel in channels:\n if str(channel).find('bye') != -1 or str(channel).find('leave'\n ) != -1:\n lv_channel = channel\n break\n else:\n lv_channel = guild.get_channel(int(data['leave_channel']))\n try:\n await lv_channel.send(server_leave_msg)\n except AttributeError:\n print('DEBUG: No leave channel has been set or found.')\n\n\[email protected](name='remove_leave', aliases=['rleave', 'remleave'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n Data.server_data[str(ctx.guild.id)]['leave_channel'] = channel\n await ctx.send(\"This server's leave channel has been Removed\")\n\n\[email protected]\nasync def on_command_error(ctx, error):\n try:\n error = error.original\n except Exception:\n pass\n if type(error) is discord.ext.commands.errors.CommandNotFound:\n return\n elif type(error) is discord.ext.commands.errors.BadArgument:\n pass\n elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:\n pass\n elif type(error) is discord.ext.commands.errors.NoPrivateMessage:\n pass\n elif type(error) is discord.ext.commands.errors.MissingPermissions:\n pass\n elif type(error) is discord.ext.commands.errors.NotOwner:\n pass\n elif type(error) is discord.ext.commands.errors.CommandOnCooldown:\n pass\n elif type(error) is discord.ext.commands.errors.ChannelNotFound:\n pass\n elif type(error) is discord.ext.commands.errors.BadUnionArgument:\n pass\n elif type(error) is discord.ext.commands.errors.BotMissingPermissions:\n pass\n elif type(error) is discord.errors.Forbidden:\n error = \"I don't have permission to do that!\"\n else:\n print(f'Error {type(error)}: {error}')\n traceback.print_exception(type(error), error, error.__traceback__,\n file=sys.stderr)\n embed = discord.Embed(title='Error!', description=\n 'An unexpected error ocurred. Please report this to the dev.'\n )\n embed.add_field(name='Error Message:', value=\n f'{type(error)}:\\n{error}', inline=False)\n await ctx.send(f'{error}')\n\n\ndef insert_returns(body):\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n if isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n if isinstance(body[-1], ast.With):\n insert_returns(body[-1].body)\n\n\[email protected](name='eval')\nasync def eval_fn(ctx, *, cmd):\n \"\"\"Evaluates input.\n Input is interpreted as newline seperated statements.\n If the last statement is an expression, that is the return value.\n Usable globals:\n - `bot`: the bot instance\n - `discord`: the discord module\n - `commands`: the discord.ext.commands module\n - `ctx`: the invokation context\n - `__import__`: the builtin `__import__` function\n Such that `>eval 1 + 1` gives `2` as the result.\n The following invokation will cause the bot to send the text '9'\n to the channel of invokation and return '3' as the result of evaluating\n >eval ```\n a = 1 + 2\n b = a * 2\n await ctx.send(a + b)\n a\n ```\n \"\"\"\n if ctx.message.author.id not in [400857098121904149, 733532987794128897]:\n await ctx.send('You are not authorized to run this command')\n return\n fn_name = '_eval_expr'\n cmd = cmd.strip('` ')\n cmd = '\\n'.join(f' {i}' for i in cmd.splitlines())\n body = f'async def {fn_name}():\\n{cmd}'\n parsed = ast.parse(body)\n body = parsed.body[0].body\n insert_returns(body)\n env = {'bot': ctx.bot, 'discord': discord, 'commands': commands, 'ctx':\n ctx, '__import__': __import__}\n exec(compile(parsed, filename='<ast>', mode='exec'), env)\n result = await eval(f'{fn_name}()', env)\n await ctx.send(result)\n\n\[email protected](name='data')\nasync def data(ctx):\n is_owner = await bot.is_owner(ctx.author)\n if is_owner or ctx.author.id == 733532987794128897:\n data_file = discord.File('data.json')\n await ctx.send(file=data_file)\n\n\[email protected]\nasync def on_message(message: discord.Message):\n global previous_msg_sender_id\n if message.author.bot:\n return\n author: discord.Member = message.author\n channel: discord.TextChannel = message.channel\n guild: discord.Guild = message.guild\n await bot.process_commands(message)\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n if message.content.replace('!', '') == bot.user.mention:\n pre = data['prefix']\n await channel.send(f'The prefix in this server is `{pre}`')\n for afk_user_entry in data['afks']:\n afk_user_id = int(afk_user_entry['user'])\n afk_reason = afk_user_entry['reason']\n afk_user = guild.get_member(afk_user_id)\n if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:\n Data.server_data[str(guild.id)]['afks'].remove(afk_user_entry)\n await channel.send(f'**{afk_user}** is no longer AFK.')\n elif afk_user in message.mentions:\n await channel.send(\n f'**{afk_user}** is currently AFK because **{afk_reason}**.')\n if data['pay_respects'] and message.content.strip().lower() == 'f':\n await channel.send(\n f'**{author.display_name}** has paid their respects...')\n if data['active'] and str(author.id) not in data['users']:\n if not str(channel.id) in data['channels']:\n perms = author.permissions_in(channel)\n if not perms.administrator:\n if ('http://' in message.content or 'https://' in message.\n content):\n if len(data['urls']) > 0:\n for url in data['urls']:\n if not url in message.content:\n await channel.purge(limit=1)\n msg1 = await channel.send(\n f'{author.mention}, you are not allowed to send links in this channel.'\n )\n await asyncio.sleep(2)\n await msg1.delete()\n else:\n await channel.purge(limit=1)\n msg2 = await channel.send(\n f'{author.mention}, you are not allowed to send links in this channel.'\n )\n await asyncio.sleep(3)\n await msg2.delete()\n elif len(message.attachments) > 0:\n await channel.purge(limit=1)\n msg3 = await channel.send(\n f'{author.mention}, you are not allowed to send attachments in this channel.'\n )\n await asyncio.sleep(3)\n await msg3.delete()\n previous_msg_sender_id = author.id\n\n\nbot.run(TOKEN)\n",
"step-4": "import os\nimport subprocess\nimport discord\nimport asyncio\nimport traceback\nimport sys\nimport ast\nfrom discord.ext import commands\nfrom cogs.misc import Miscellaneous\nfrom cogs.serversettings import ServerSettings\nfrom cogs.mod import Moderator\nfrom cogs.automod import AutoMod\nfrom cogs.google import Google\nfrom cogs.fun import Fun\nfrom otherscipts.helpers import update_presence\nfrom otherscipts.data import Data\nTOKEN = os.getenv('SPARTA_TOKEN')\nintents = discord.Intents.default()\nintents.members = True\n\n\ndef get_prefix(client, message):\n if str(message.guild.id) not in Data.server_data:\n Data.server_data[str(message.guild.id)] = Data.create_new_data()\n data = Data.server_data[str(message.guild.id)]\n return data['prefix']\n\n\nPREFIX = get_prefix\nbot = commands.Bot(command_prefix=PREFIX, description=\n 'I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.',\n intents=intents, help_command=None, case_insensitive=True)\nTHEME_COLOR = discord.Colour.blue()\nbot.add_cog(Miscellaneous(bot, THEME_COLOR))\nbot.add_cog(ServerSettings(bot, THEME_COLOR))\nbot.add_cog(Moderator(bot, THEME_COLOR))\nbot.add_cog(AutoMod(bot, THEME_COLOR))\nbot.add_cog(Fun(bot, THEME_COLOR))\nbot.add_cog(Google(bot, THEME_COLOR))\nprevious_msg_sender_id = None\n\n\[email protected]\nasync def on_ready():\n bot.loop.create_task(Data.auto_update_data())\n bot.loop.create_task(update_presence(bot, PREFIX))\n print('Bot is ready...')\n\n\[email protected]\nasync def on_guild_join(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(\n f'Joined - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}'\n )\n\n\[email protected]\nasync def on_guild_remove(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(\n f'Left - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}')\n\n\[email protected]\nasync def on_member_join(member):\n guild: discord.Guild = member.guild\n channels = guild.channels\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n print(f'{member} has joined {guild} server...')\n join_role = guild.get_role(data['join_role'])\n if join_role is not None:\n await member.add_roles(join_role)\n if data['welcome_msg'] is None:\n server_wlcm_msg = (\n f'Welcome, {member.mention}, to the Official **{guild.name}** Server'\n )\n else:\n server_wlcm_msg = data['welcome_msg']\n server_wlcm_msg = server_wlcm_msg.replace('[mention]',\n f'{member.mention}')\n wel_channel = None\n if data['welcome_channel'] is None:\n for channel in channels:\n if str(channel).find('welcome') != -1:\n wel_channel = channel\n break\n else:\n wel_channel = guild.get_channel(int(data['welcome_channel']))\n try:\n await wel_channel.send(server_wlcm_msg)\n except AttributeError:\n print('DEBUG: No welcome channel has been set or found.')\n\n\[email protected](name='remove_welcome', aliases=['rwel', 'remwel'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n Data.server_data[str(ctx.guild.id)]['welcome_channel'] = channel\n await ctx.send(\"This server's welcome channel has been removed\")\n\n\[email protected]\nasync def on_member_remove(member):\n guild = member.guild\n channels = guild.channels\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n print(f'{member} has left the {guild.name}...')\n if data['leave_msg'] is None:\n server_leave_msg = (\n f'Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server'\n )\n else:\n server_leave_msg = data['leave_msg']\n server_leave_msg = server_leave_msg.replace('[member]', f'{member}')\n lv_channel = None\n if data['leave_channel'] is None:\n for channel in channels:\n if str(channel).find('bye') != -1 or str(channel).find('leave'\n ) != -1:\n lv_channel = channel\n break\n else:\n lv_channel = guild.get_channel(int(data['leave_channel']))\n try:\n await lv_channel.send(server_leave_msg)\n except AttributeError:\n print('DEBUG: No leave channel has been set or found.')\n\n\[email protected](name='remove_leave', aliases=['rleave', 'remleave'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n Data.server_data[str(ctx.guild.id)]['leave_channel'] = channel\n await ctx.send(\"This server's leave channel has been Removed\")\n\n\[email protected]\nasync def on_command_error(ctx, error):\n try:\n error = error.original\n except Exception:\n pass\n if type(error) is discord.ext.commands.errors.CommandNotFound:\n return\n elif type(error) is discord.ext.commands.errors.BadArgument:\n pass\n elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:\n pass\n elif type(error) is discord.ext.commands.errors.NoPrivateMessage:\n pass\n elif type(error) is discord.ext.commands.errors.MissingPermissions:\n pass\n elif type(error) is discord.ext.commands.errors.NotOwner:\n pass\n elif type(error) is discord.ext.commands.errors.CommandOnCooldown:\n pass\n elif type(error) is discord.ext.commands.errors.ChannelNotFound:\n pass\n elif type(error) is discord.ext.commands.errors.BadUnionArgument:\n pass\n elif type(error) is discord.ext.commands.errors.BotMissingPermissions:\n pass\n elif type(error) is discord.errors.Forbidden:\n error = \"I don't have permission to do that!\"\n else:\n print(f'Error {type(error)}: {error}')\n traceback.print_exception(type(error), error, error.__traceback__,\n file=sys.stderr)\n embed = discord.Embed(title='Error!', description=\n 'An unexpected error ocurred. Please report this to the dev.'\n )\n embed.add_field(name='Error Message:', value=\n f'{type(error)}:\\n{error}', inline=False)\n await ctx.send(f'{error}')\n\n\ndef insert_returns(body):\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n if isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n if isinstance(body[-1], ast.With):\n insert_returns(body[-1].body)\n\n\[email protected](name='eval')\nasync def eval_fn(ctx, *, cmd):\n \"\"\"Evaluates input.\n Input is interpreted as newline seperated statements.\n If the last statement is an expression, that is the return value.\n Usable globals:\n - `bot`: the bot instance\n - `discord`: the discord module\n - `commands`: the discord.ext.commands module\n - `ctx`: the invokation context\n - `__import__`: the builtin `__import__` function\n Such that `>eval 1 + 1` gives `2` as the result.\n The following invokation will cause the bot to send the text '9'\n to the channel of invokation and return '3' as the result of evaluating\n >eval ```\n a = 1 + 2\n b = a * 2\n await ctx.send(a + b)\n a\n ```\n \"\"\"\n if ctx.message.author.id not in [400857098121904149, 733532987794128897]:\n await ctx.send('You are not authorized to run this command')\n return\n fn_name = '_eval_expr'\n cmd = cmd.strip('` ')\n cmd = '\\n'.join(f' {i}' for i in cmd.splitlines())\n body = f'async def {fn_name}():\\n{cmd}'\n parsed = ast.parse(body)\n body = parsed.body[0].body\n insert_returns(body)\n env = {'bot': ctx.bot, 'discord': discord, 'commands': commands, 'ctx':\n ctx, '__import__': __import__}\n exec(compile(parsed, filename='<ast>', mode='exec'), env)\n result = await eval(f'{fn_name}()', env)\n await ctx.send(result)\n\n\[email protected](name='data')\nasync def data(ctx):\n is_owner = await bot.is_owner(ctx.author)\n if is_owner or ctx.author.id == 733532987794128897:\n data_file = discord.File('data.json')\n await ctx.send(file=data_file)\n\n\[email protected]\nasync def on_message(message: discord.Message):\n global previous_msg_sender_id\n if message.author.bot:\n return\n author: discord.Member = message.author\n channel: discord.TextChannel = message.channel\n guild: discord.Guild = message.guild\n await bot.process_commands(message)\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n if message.content.replace('!', '') == bot.user.mention:\n pre = data['prefix']\n await channel.send(f'The prefix in this server is `{pre}`')\n for afk_user_entry in data['afks']:\n afk_user_id = int(afk_user_entry['user'])\n afk_reason = afk_user_entry['reason']\n afk_user = guild.get_member(afk_user_id)\n if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:\n Data.server_data[str(guild.id)]['afks'].remove(afk_user_entry)\n await channel.send(f'**{afk_user}** is no longer AFK.')\n elif afk_user in message.mentions:\n await channel.send(\n f'**{afk_user}** is currently AFK because **{afk_reason}**.')\n if data['pay_respects'] and message.content.strip().lower() == 'f':\n await channel.send(\n f'**{author.display_name}** has paid their respects...')\n if data['active'] and str(author.id) not in data['users']:\n if not str(channel.id) in data['channels']:\n perms = author.permissions_in(channel)\n if not perms.administrator:\n if ('http://' in message.content or 'https://' in message.\n content):\n if len(data['urls']) > 0:\n for url in data['urls']:\n if not url in message.content:\n await channel.purge(limit=1)\n msg1 = await channel.send(\n f'{author.mention}, you are not allowed to send links in this channel.'\n )\n await asyncio.sleep(2)\n await msg1.delete()\n else:\n await channel.purge(limit=1)\n msg2 = await channel.send(\n f'{author.mention}, you are not allowed to send links in this channel.'\n )\n await asyncio.sleep(3)\n await msg2.delete()\n elif len(message.attachments) > 0:\n await channel.purge(limit=1)\n msg3 = await channel.send(\n f'{author.mention}, you are not allowed to send attachments in this channel.'\n )\n await asyncio.sleep(3)\n await msg3.delete()\n previous_msg_sender_id = author.id\n\n\nbot.run(TOKEN)\n",
"step-5": "import os\nimport subprocess\nimport discord\nimport asyncio\nimport traceback\nimport sys\nimport ast\n\nfrom discord.ext import commands\n\n# Import Cogs\nfrom cogs.misc import Miscellaneous\nfrom cogs.serversettings import ServerSettings\nfrom cogs.mod import Moderator\nfrom cogs.automod import AutoMod\nfrom cogs.google import Google\n\n# Minigame/Fun Cogs\nfrom cogs.fun import Fun\n#from cogs.hangman import Hangman\n#from cogs.rps import RockPaperScissors\n\nfrom otherscipts.helpers import update_presence\nfrom otherscipts.data import Data\n\nTOKEN = os.getenv('SPARTA_TOKEN')\n\nintents = discord.Intents.default()\nintents.members = True\n\n\ndef get_prefix(client, message):\n if str(message.guild.id) not in Data.server_data:\n Data.server_data[str(message.guild.id)] = Data.create_new_data()\n\n data = Data.server_data[str(message.guild.id)]\n return data[\"prefix\"]\n\n\nPREFIX = get_prefix\nbot = commands.Bot(\n command_prefix=PREFIX,\n description=\"I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.\",\n intents=intents,\n help_command=None,\n case_insensitive=True\n)\n\nTHEME_COLOR = discord.Colour.blue()\n\n# Add Cogs\nbot.add_cog(Miscellaneous(bot, THEME_COLOR))\nbot.add_cog(ServerSettings(bot, THEME_COLOR))\nbot.add_cog(Moderator(bot, THEME_COLOR))\nbot.add_cog(AutoMod(bot, THEME_COLOR))\nbot.add_cog(Fun(bot, THEME_COLOR))\nbot.add_cog(Google(bot, THEME_COLOR))\n#bot.add_cog(Hangman(bot, THEME_COLOR))\n#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))\n\nprevious_msg_sender_id = None\n\n\[email protected]\nasync def on_ready():\n bot.loop.create_task(Data.auto_update_data())\n bot.loop.create_task(update_presence(bot, PREFIX))\n print(\"Bot is ready...\")\n\[email protected]\nasync def on_guild_join(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(f\"Joined - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}\")\[email protected]\nasync def on_guild_remove(guild):\n log_channel = bot.get_channel(773580297954394162)\n await log_channel.send(f\"Left - {guild.name}\\nServer ID - {guild.id}\\nOwner - {guild.owner}\")\n\[email protected]\nasync def on_member_join(member):\n guild: discord.Guild = member.guild\n channels = guild.channels\n\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n\n print(f\"{member} has joined {guild} server...\")\n\n join_role = guild.get_role(data[\"join_role\"])\n if join_role is not None:\n await member.add_roles(join_role)\n\n # Welcome Message\n if data[\"welcome_msg\"] is None:\n server_wlcm_msg = f\"Welcome, {member.mention}, to the Official **{guild.name}** Server\"\n else:\n server_wlcm_msg = data[\"welcome_msg\"]\n server_wlcm_msg = server_wlcm_msg.replace(\n \"[mention]\", f\"{member.mention}\")\n\n # Welcome Channel\n wel_channel = None\n\n if data[\"welcome_channel\"] is None:\n for channel in channels:\n if str(channel).find(\"welcome\") != -1:\n wel_channel = channel\n break\n else:\n wel_channel = guild.get_channel(int(data[\"welcome_channel\"]))\n\n try:\n await wel_channel.send(server_wlcm_msg)\n except AttributeError:\n print(\"DEBUG: No welcome channel has been set or found.\")\n\n#Remove welcome channel\[email protected](name=\"remove_welcome\", aliases=['rwel', 'remwel'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome(ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n \n Data.server_data[str(ctx.guild.id)][\"welcome_channel\"] = channel\n await ctx.send(\"This server's welcome channel has been removed\")\n\[email protected]\nasync def on_member_remove(member):\n guild = member.guild\n channels = guild.channels\n\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n data = Data.server_data[str(guild.id)]\n\n print(f\"{member} has left the {guild.name}...\")\n\n # Leave Message\n if data[\"leave_msg\"] is None:\n server_leave_msg = f\"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server\"\n else:\n server_leave_msg = data[\"leave_msg\"]\n server_leave_msg = server_leave_msg.replace(\"[member]\", f\"{member}\")\n\n # Leave Channel\n lv_channel = None\n\n if data[\"leave_channel\"] is None:\n for channel in channels:\n if str(channel).find(\"bye\") != -1 or str(channel).find(\"leave\") != -1:\n lv_channel = channel\n break\n else:\n lv_channel = guild.get_channel(int(data[\"leave_channel\"]))\n\n try:\n await lv_channel.send(server_leave_msg)\n except AttributeError:\n print(\"DEBUG: No leave channel has been set or found.\")\n\n\n#Remove leave\[email protected](name=\"remove_leave\", aliases=['rleave', 'remleave'])\[email protected]_guild_permissions(manage_guild=True)\nasync def remove_welcome( ctx, *, channel):\n if str(ctx.guild.id) not in Data.server_data:\n Data.server_data[str(ctx.guild.id)] = Data.create_new_data()\n \n Data.server_data[str(ctx.guild.id)][\"leave_channel\"] = channel\n await ctx.send(\"This server's leave channel has been Removed\")\n\[email protected]\nasync def on_command_error(ctx, error):\n try:\n error = error.original\n except Exception:\n pass\n if type(error) is discord.ext.commands.errors.CommandNotFound:\n return\n elif type(error) is discord.ext.commands.errors.BadArgument:\n pass\n elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:\n pass\n elif type(error) is discord.ext.commands.errors.NoPrivateMessage:\n pass\n elif type(error) is discord.ext.commands.errors.MissingPermissions:\n pass\n elif type(error) is discord.ext.commands.errors.NotOwner:\n pass\n elif type(error) is discord.ext.commands.errors.CommandOnCooldown:\n pass\n elif type(error) is discord.ext.commands.errors.ChannelNotFound:\n pass\n elif type(error) is discord.ext.commands.errors.BadUnionArgument:\n pass\n elif type(error) is discord.ext.commands.errors.BotMissingPermissions:\n pass\n elif type(error) is discord.errors.Forbidden:\n error = \"I don't have permission to do that!\"\n else:\n print(f\"Error {type(error)}: {error}\")\n traceback.print_exception(\n type(error), error, error.__traceback__, file=sys.stderr\n )\n\n embed = discord.Embed(\n title='Error!',\n description='An unexpected error ocurred.\\\n Please report this to the dev.',\n )\n embed.add_field(\n name='Error Message:',\n value=f\"{type(error)}:\\n{error}\",\n inline=False\n )\n await ctx.send(f\"{error}\")\n\n\n# LABEL: Programming Commands\ndef insert_returns(body):\n # insert return stmt if the last expression is a expression statement\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n\n # for if statements, we insert returns into the body and the orelse\n if isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n\n # for with blocks, again we insert returns into the body\n if isinstance(body[-1], ast.With):\n insert_returns(body[-1].body)\n\n\[email protected](name='eval')\nasync def eval_fn(ctx, *, cmd):\n \"\"\"Evaluates input.\n Input is interpreted as newline seperated statements.\n If the last statement is an expression, that is the return value.\n Usable globals:\n - `bot`: the bot instance\n - `discord`: the discord module\n - `commands`: the discord.ext.commands module\n - `ctx`: the invokation context\n - `__import__`: the builtin `__import__` function\n Such that `>eval 1 + 1` gives `2` as the result.\n The following invokation will cause the bot to send the text '9'\n to the channel of invokation and return '3' as the result of evaluating\n >eval ```\n a = 1 + 2\n b = a * 2\n await ctx.send(a + b)\n a\n ```\n \"\"\"\n if ctx.message.author.id not in [400857098121904149, 733532987794128897]:\n await ctx.send(\"You are not authorized to run this command\")\n return\n\n fn_name = \"_eval_expr\"\n\n cmd = cmd.strip(\"` \")\n\n # add a layer of indentation\n cmd = \"\\n\".join(f\" {i}\" for i in cmd.splitlines())\n\n # wrap in async def body\n body = f\"async def {fn_name}():\\n{cmd}\"\n\n parsed = ast.parse(body)\n body = parsed.body[0].body\n\n insert_returns(body)\n\n env = {\n 'bot': ctx.bot,\n 'discord': discord,\n 'commands': commands,\n 'ctx': ctx,\n '__import__': __import__\n }\n exec(compile(parsed, filename=\"<ast>\", mode=\"exec\"), env)\n\n result = (await eval(f\"{fn_name}()\", env))\n await ctx.send(result)\n\n\n# LABEL: Debugging Commands\[email protected](name=\"data\")\nasync def data(ctx):\n is_owner = await bot.is_owner(ctx.author)\n if is_owner or ctx.author.id == 733532987794128897: # for real sparta\n data_file = discord.File(\"data.json\")\n await ctx.send(file=data_file)\n\n\[email protected]\nasync def on_message(message: discord.Message):\n global previous_msg_sender_id\n\n if message.author.bot:\n return\n\n author: discord.Member = message.author\n channel: discord.TextChannel = message.channel\n guild: discord.Guild = message.guild\n # print(str(author), \": \", message.content)\n\n await bot.process_commands(message)\n\n if str(guild.id) not in Data.server_data:\n Data.server_data[str(guild.id)] = Data.create_new_data()\n\n data = Data.server_data[str(guild.id)]\n\n if message.content.replace('!', '') == bot.user.mention:\n pre = data[\"prefix\"]\n await channel.send(f\"The prefix in this server is `{pre}`\")\n\n for afk_user_entry in data[\"afks\"]:\n afk_user_id = int(afk_user_entry[\"user\"])\n afk_reason = afk_user_entry[\"reason\"]\n afk_user = guild.get_member(afk_user_id)\n\n if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:\n Data.server_data[str(guild.id)][\"afks\"].remove(afk_user_entry)\n await channel.send(f\"**{afk_user}** is no longer AFK.\")\n\n elif afk_user in message.mentions:\n await channel.send(f\"**{afk_user}** is currently AFK because **{afk_reason}**.\")\n\n if data[\"pay_respects\"] and message.content.strip().lower() == \"f\":\n await channel.send(f\"**{author.display_name}** has paid their respects...\")\n\n if data[\"active\"] and str(author.id) not in data[\"users\"]:\n if not str(channel.id) in data[\"channels\"]:\n perms = author.permissions_in(channel)\n if not perms.administrator:\n if \"http://\" in message.content or \"https://\" in message.content:\n if len(data[\"urls\"]) > 0:\n for url in data[\"urls\"]:\n if not url in message.content:\n await channel.purge(limit=1)\n msg1 = await channel.send(f\"{author.mention}, you are not allowed to send links in this channel.\")\n await asyncio.sleep(2)\n await msg1.delete()\n else:\n await channel.purge(limit=1)\n msg2 = await channel.send(f\"{author.mention}, you are not allowed to send links in this channel.\")\n await asyncio.sleep(3)\n await msg2.delete()\n\n elif len(message.attachments) > 0:\n await channel.purge(limit=1)\n msg3 = await channel.send(f\"{author.mention}, you are not allowed to send attachments in this channel.\")\n await asyncio.sleep(3)\n await msg3.delete()\n\n previous_msg_sender_id = author.id\n\n\nbot.run(TOKEN)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# Reddit API feed
import praw
import sys
import os
def main():
if os.getenv("REDDIT_CLIENT_ID") is None:
print "Set your Reddit environment variables:"
print "REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET"
sys.exit()
client_id = os.environ['REDDIT_CLIENT_ID']
client_secret = os.environ['REDDIT_CLIENT_SECRET']
try:
reddit_api = praw.Reddit(client_id = client_id,
client_secret = client_secret,
user_agent = "sentiment")
except:
print "Reddit auth failed."
sys.exit()
sub = raw_input("Subreddit: ")
keyword = raw_input("Keyword: ")
get_posts(keyword, sub, reddit_api)
# currently only dumps top 10 posts from subreddit
# regardless of keyword
def get_posts(keyword, sub, reddit_api):
for post in reddit_api.subreddit(sub).hot(limit=10):
print post.title
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "9543992e1b115f83640a07c4d4372be0fb465199",
"index": 3256,
"step-1": "# Reddit API feed\n\nimport praw\nimport sys\nimport os\n\ndef main():\n if os.getenv(\"REDDIT_CLIENT_ID\") is None:\n print \"Set your Reddit environment variables:\"\n print \"REDDIT_CLIENT_ID and REDDIT_CLIENT_SECRET\"\n sys.exit()\n client_id = os.environ['REDDIT_CLIENT_ID']\n client_secret = os.environ['REDDIT_CLIENT_SECRET']\n try:\n reddit_api = praw.Reddit(client_id = client_id,\n client_secret = client_secret,\n user_agent = \"sentiment\")\n except:\n print \"Reddit auth failed.\"\n sys.exit()\n sub = raw_input(\"Subreddit: \")\n keyword = raw_input(\"Keyword: \")\n get_posts(keyword, sub, reddit_api)\n\n# currently only dumps top 10 posts from subreddit\n# regardless of keyword\ndef get_posts(keyword, sub, reddit_api):\n for post in reddit_api.subreddit(sub).hot(limit=10):\n print post.title\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
import os, sys
import csv
import glob
if len(sys.argv)==3:
res_dir = sys.argv[1]
info = sys.argv[2]
else:
print "Incorrect arguments: enter outout directory"
sys.exit(0)
seg = dict([('PB2','1'), ('PB1','2'), ('PA','3'), ('HA','4'), ('NP','5'), ('NA','6'), ('MP','7'), ('NS','8')])
# Read the summary info file:
info_list = []
with open(info, 'r') as csvfile:
reader = csv.reader(csvfile)
for xi in reader:
print xi
info_list = xi
print info_list
# if one samlple or many samples : fixing the list length issue
if len(info_list[0]) < 4 : subtypes = list[set([c[-1] for c in info_list])]
else: subtypes = [info_list[-1],]
# Merge all Annotation file of the consensus genome
all_annot = []
assembled_cons = [["Sample Id", "Sample Name", "HA", "NA", "MP", "PB2", "PB1", "PA", "NP", "NS"]]
for sub_type in subtypes:
for x in glob.glob(res_dir + "/Consensus_genome/" + sub_type + "/*csv"):
X = x.split("/")
y = X[-1].replace("-annotation.csv", "")
with open(x, 'rb') as csvfile:
r = csv.reader(csvfile)
ha = "-"
na = "-"
mp = "-"
pb2 = "-"
pb1 = "-"
pa = "-"
np = "-"
ns = "-"
for a in r:
if a[0] != "Genome":
print X, a
seg_nam = a[0].split("|")[1]
a.insert(0,y + "." + seg[seg_nam])
all_annot.append(a)
if a[1].split("|")[1] == "HA": ha = a[-1]
if a[1].split("|")[1] == "NA": na = a[-1]
if a[1].split("|")[1] == "MP": mp = a[-1]
if a[1].split("|")[1] == "PB2": pb2 = a[-1]
if a[1].split("|")[1] == "PB1": pb1 = a[-1]
if a[1].split("|")[1] == "PA": pa = a[-1]
if a[1].split("|")[1] == "NP": np = a[-1]
if a[1].split("|")[1] == "NS": ns = a[-1]
else: annot_header = a
assembled_cons.append([y, a[1].split("|")[0], ha, na, mp, pb2, pb1, pa, np, ns])
with open(res_dir + '/' + sub_type + '-ConsensusDetail.csv', 'wb') as f:
writer = csv.writer(f)
annot_header.insert(0,"Sample Id")
all_annot.insert(0,annot_header)
writer.writerows(all_annot)
with open(res_dir + '/' + sub_type + '-ConsensusSummary.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(assembled_cons)
# Merge all SNPs called...
merge_snps = []
for sub_type in subtypes:
for x in glob.glob(res_dir + "/Snps/" + sub_type + "/*.vcf"):
X = x.split("/")
y = X[-1].replace("-genome-snps.vcf", "")
with open(x, 'rb') as csvfile:
r = csv.reader(csvfile, delimiter="\t")
for s in r:
if not s[0].startswith("#"):
print s
seg_nam = s[0].split("|")[1]
s.insert(0, y + "." + seg[seg_nam])
merge_snps.append(s)
with open(res_dir + '/' + sub_type + '-SNPs.csv', 'wb') as f:
writer = csv.writer(f)
merge_snps.insert(0, ["Sample Id", "Sample Name", "POS","ID","REF","ALT", "QUAL", "FILTER", "INFO"])
writer.writerows(merge_snps)
|
normal
|
{
"blob_id": "4a2796645f1ab585084be47c8cd984c2945aa38b",
"index": 4270,
"step-1": "#!/usr/bin/python\n\nimport os, sys\nimport csv\nimport glob\n\nif len(sys.argv)==3:\n res_dir = sys.argv[1]\n info = sys.argv[2]\n\nelse:\n print \"Incorrect arguments: enter outout directory\"\n sys.exit(0)\n\nseg = dict([('PB2','1'), ('PB1','2'), ('PA','3'), ('HA','4'), ('NP','5'), ('NA','6'), ('MP','7'), ('NS','8')])\n# Read the summary info file:\ninfo_list = []\nwith open(info, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for xi in reader:\n print xi\n info_list = xi \nprint info_list\n# if one samlple or many samples : fixing the list length issue\nif len(info_list[0]) < 4 : subtypes = list[set([c[-1] for c in info_list])]\nelse: subtypes = [info_list[-1],]\n\n# Merge all Annotation file of the consensus genome\nall_annot = []\nassembled_cons = [[\"Sample Id\", \"Sample Name\", \"HA\", \"NA\", \"MP\", \"PB2\", \"PB1\", \"PA\", \"NP\", \"NS\"]]\n\nfor sub_type in subtypes:\n for x in glob.glob(res_dir + \"/Consensus_genome/\" + sub_type + \"/*csv\"):\n X = x.split(\"/\")\n y = X[-1].replace(\"-annotation.csv\", \"\")\n with open(x, 'rb') as csvfile:\n r = csv.reader(csvfile)\n ha = \"-\"\n na = \"-\"\n mp = \"-\"\n pb2 = \"-\"\n pb1 = \"-\"\n pa = \"-\"\n np = \"-\"\n ns = \"-\" \n for a in r:\n if a[0] != \"Genome\":\n\t print X, a\n seg_nam = a[0].split(\"|\")[1]\n a.insert(0,y + \".\" + seg[seg_nam]) \n all_annot.append(a) \n\t if a[1].split(\"|\")[1] == \"HA\": ha = a[-1]\n if a[1].split(\"|\")[1] == \"NA\": na = a[-1]\n\t if a[1].split(\"|\")[1] == \"MP\": mp = a[-1]\n if a[1].split(\"|\")[1] == \"PB2\": pb2 = a[-1]\t\n\t if a[1].split(\"|\")[1] == \"PB1\": pb1 = a[-1]\n if a[1].split(\"|\")[1] == \"PA\": pa = a[-1]\n\t if a[1].split(\"|\")[1] == \"NP\": np = a[-1]\n if a[1].split(\"|\")[1] == \"NS\": ns = a[-1] \n else: annot_header = a \n assembled_cons.append([y, a[1].split(\"|\")[0], ha, na, mp, pb2, pb1, pa, np, ns]) \t\n\n with open(res_dir + '/' + sub_type + '-ConsensusDetail.csv', 'wb') as f:\n writer = csv.writer(f)\n annot_header.insert(0,\"Sample Id\")\n all_annot.insert(0,annot_header)\n writer.writerows(all_annot) \n \n with open(res_dir + '/' + sub_type + '-ConsensusSummary.csv', 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(assembled_cons) \n\n\n# Merge all SNPs called...\nmerge_snps = []\nfor sub_type in subtypes:\n for x in glob.glob(res_dir + \"/Snps/\" + sub_type + \"/*.vcf\"):\n X = x.split(\"/\")\n y = X[-1].replace(\"-genome-snps.vcf\", \"\")\n with open(x, 'rb') as csvfile:\n r = csv.reader(csvfile, delimiter=\"\\t\")\n for s in r:\n\tif not s[0].startswith(\"#\"):\n\t print s\n seg_nam = s[0].split(\"|\")[1]\n s.insert(0, y + \".\" + seg[seg_nam])\n\t merge_snps.append(s)\n\n with open(res_dir + '/' + sub_type + '-SNPs.csv', 'wb') as f:\n writer = csv.writer(f)\n merge_snps.insert(0, [\"Sample Id\", \"Sample Name\", \"POS\",\"ID\",\"REF\",\"ALT\", \"QUAL\", \"FILTER\", \"INFO\"])\n writer.writerows(merge_snps) \n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def test_logsources_model(self):
"""
Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado
Returns:
"""
log_source = LogSources.objects.get(Model="iptables v1.4.21")
self.assertEqual(log_source.get_model(), "iptables v1.4.21")
|
normal
|
{
"blob_id": "c645461effe288a1959b783473d62ff99ca29547",
"index": 8746,
"step-1": "<mask token>\n",
"step-2": "def test_logsources_model(self):\n \"\"\"\n Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado\n Returns:\n\n \"\"\"\n log_source = LogSources.objects.get(Model='iptables v1.4.21')\n self.assertEqual(log_source.get_model(), 'iptables v1.4.21')\n",
"step-3": "def test_logsources_model(self):\n \"\"\"\n Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado\n Returns:\n\n \"\"\"\n log_source = LogSources.objects.get(Model=\"iptables v1.4.21\")\n self.assertEqual(log_source.get_model(), \"iptables v1.4.21\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#encoding=utf-8
import pytest
from frame_project.实战2.main_page import MainPage
class TestMian:
def test_mian(self):
MainPage().goto_marketpage().goto_search().search()
if __name__ == '__main__':
pytest.main(['test_case.py','-s','-v'])
|
normal
|
{
"blob_id": "e1751cc6f76f56e62cd02d61db65f1c27a4ff1b9",
"index": 7351,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\nif __name__ == '__main__':\n pytest.main(['test_case.py', '-s', '-v'])\n",
"step-4": "import pytest\nfrom frame_project.实战2.main_page import MainPage\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\nif __name__ == '__main__':\n pytest.main(['test_case.py', '-s', '-v'])\n",
"step-5": "#encoding=utf-8\nimport pytest\n\nfrom frame_project.实战2.main_page import MainPage\n\n\nclass TestMian:\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\nif __name__ == '__main__':\n pytest.main(['test_case.py','-s','-v'])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import json
import os
import sys
"""
Course: cmps 4883
Assignemt: A03
Date: 2/10/19
Github username: acdczlc
Repo url: https://github.com/acdczlc/4883-SWTools-Conley
Name: Zac Conley
Description:
Calculates all stats for questions about stats
"""
##############################################################
# MostTeams(dict of off and def players)
# gets player who played for most teams
#
# Params:
# dict of players
# Returns:
# player with most teams
def MostTeams(OffAndDef):
most=[]
count=0 # set comparison
for playerid, playerdata in OffAndDef.items():
if(playerdata['name']!=''): #only get real players
if(len(playerdata['Teams'])>count):
count=len(playerdata['Teams']) #get count
most=[[playerdata['name'],len(playerdata['Teams'])]] # replace with player
elif(len(playerdata['Teams'])==count):
most.append([playerdata['name'],len(playerdata['Teams'])]) # add multiple teams
return most
##############################################################
# MostTeamsOneYear(dict of off and def players)
# gets player who played for most teams in one year
#
# Params:
# dict of players
# Returns:
# player with most teams
def MostTeamsOneYear(OffAndDef):
teams={}
maximum={}
count=0
for playerid, playerdata in OffAndDef.items():
if(playerdata['name']!=''):
for years in playerdata: #avoids all keys except years
if(years!='Drops' and years!='NegRushYards' and years!='NegRush' and years!='Teams' and years!='PassForLoss' and years!="name"):
try: #try block to avoid nonplayers
if(len(playerdata[years])>count): # if player has most teams so far
if((len(playerdata[years]) not in teams.keys())):
teams.clear() # delete all previous players
teams[len(playerdata[years])]={}
teams[len(playerdata[years])][playerdata['name']]=years
count=len(playerdata[years])
elif(len(playerdata[years])==count): #multiple players have the same number of teams
teams[len(playerdata[years])].append(playerdata['name'],years)
except:
pass
return teams
##############################################################
# NegativeRushingYards(dict of off and def players)
# gets player with most negative rushing yards
#
# Params:
# dict of players
# Returns:
# player with most negative rushing yards
def NegativeRushingYards(OffAndDef):
NegRushYds=[]
yds=0
for playerid, playerdata in OffAndDef.items():
if(playerdata['NegRushYards']<yds):
yds=playerdata['NegRushYards']
NegRushYds=[[playerdata['name'],playerdata['NegRushYards']]]
elif(playerdata['NegRushYards']==yds):
NegRushYds.append([playerdata['name'],playerdata['NegRushYards']])
return NegRushYds
##############################################################
# NegativeRushes(dict of off and def players)
# gets player with most negative rushes
#
# Params:
# dict of players
# Returns:
# player with most negative rushes
def NegativeRushes(OffAndDef):
rushes=[]
att=0 #attempts
for player in OffAndDef:
if(OffAndDef[player]['NegRush']>att):
att=OffAndDef[player]['NegRush']
rushes=[[OffAndDef[player]['name'],OffAndDef[player]['NegRush']]]
elif(OffAndDef[player]['NegRush']==att):
rushes.append([OffAndDef[player]['name'],OffAndDef[player]['NegRush']])
return rushes
##############################################################
# MostPassForLoss(dict of off and def players)
# gets player with most negative rushes
#
# Params:
# dict of players
# Returns:
# player with most negative rushes
def MostPassForLoss(OffAndDef):
PassForLoss=[]
att=0 #attempts
for player in OffAndDef:
if(OffAndDef[player]['PassForLoss']>att):
att=OffAndDef[player]['PassForLoss']
PassForLoss=[[OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']]]
elif(OffAndDef[player]['PassForLoss']==att):
PassForLoss.append([OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']])
return PassForLoss
##############################################################
# MostPenalties(dict of team penalties)
# gets team with most penalties
#
# Params:
# dict of teams
# Returns:
# player with most negative rushes
def MostPenalties(penalties):
pens=[]
num=0
for teamname,teamdata in penalties.items():
if(teamdata['Penalties']>num):
num=teamdata['Penalties']
pens=[[teamname,teamdata['Penalties']]]
elif (teamdata['Penalties']==num):
pens.append([teamname,teamdata['Penalties']])
return pens
##############################################################
# TeamPenaltyYards(dict of team penalties)
# gets team with most penaltiy yards
#
# Params:
# dict of teams
# Returns:
# team with most penalty yards
def TeamPenaltyYards(penalties):
pens=[]
num=0
for teamname,teamdata in penalties.items():
if(teamdata['PenaltyYards']>num):
num=teamdata['PenaltyYards']
pens=[[teamname,teamdata['PenaltyYards']]]
elif (teamdata['PenaltyYards']==num):
pens.append([teamname,teamdata['PenaltyYards']])
return pens
##############################################################
# PenaltyWins(most penalized team,dict of team penalties)
# shows correlation between penalty and record
#
# Params:
# dict of teams, most penalized team
# Returns:
# team with most penaltys and least
def PenaltyWins(penalties):
x=MostPenalties(penalties) #calls function to get most penalized team
mostPenalized=[]
for temp in x:
mostPenalized.append(team[0])
least=penalties[mostPenalized[0]]['Penalties']
mostandleast=[[mostPenalized[0],penalties[mostPenalized[0]]['Wins'],penalties[mostPenalized[0]]['Losses']]] # sets most penalized record
leastTeam=[]
for teamname, teamdata in penalties.items():
if(teamdata['Penalties']<least):
least=teamdata['Penalties']
leastTeam=[[teamname,teamdata['Wins'],teamdata['Losses']]]
elif (teamdata['Penalties']==least):
leastTeam.append([teamname,teamdata['Wins'],teamdata['Losses']])
mostandleast.append(leastTeam[0]) #adds team and record to list at end
return mostandleast
##############################################################
# AverageNumberOfPlays()
# shows average number of plays
#
# Params:
# none
# Returns:
# avg number of plays
def AverageNumberOfPlays():
games=0
plays=0
for filename in os.listdir(os.path.dirname(os.path.abspath(__file__))+'/stats'): # sets path to all stats
with open(os.path.dirname(os.path.abspath(__file__))+"/stats/"+filename,"r") as json_file:
try: #gets all stats and stores each game in a dict
data=json.load(json_file)
except:
pass
else:
for gameid, gamedata in data.items():
if(gameid!="nextupdate"):
games+=1 #increment number of games
for driveid, drivedata in gamedata['drives'].items():
if(driveid!="crntdrv"):
plays+=drivedata['numplays'] #increment number of plays
avgplays=plays/games
return avgplays
##############################################################
# LongestFG(dict of fgs)
# longest field goal
#
# Params:
# dict of fgs
# Returns:
# longest field goal and kicker
def LongestFG(fg):
fgs=[]
length=0 #longest fg
for playerid,playerdata in fg.items():
if(playerdata['Long']>length):
length=playerdata['Long']
fgs=[[playerdata['Name'],playerdata['Long']]]
elif (playerdata['Long']==length):
fgs.append([playerdata['Name'],playerdata['Long']])
return fgs
##############################################################
# MostFG(dict of fgs)
# most made field goals
#
# Params:
# dict of fgs
# Returns:
# most made field goals and kicker
def MostFG(fg):
fgs=[]
count=0 #sets counter to 0
for playerid,playerdata in fg.items():
if(playerdata['FG']>count): #if largest number of fg so far
count=playerdata['FG']
fgs=[[playerdata['Name'],playerdata['FG']]]
elif (playerdata['FG']==count): #if same number of fg
fgs.append([playerdata['Name'],playerdata['FG']])
return fgs
##############################################################
# MostMFG(dict of fgs)
# most missed field goals
#
# Params:
# dict of fgs
# Returns:
# most missed field goals and kicker
def MostMFG(fg):
fgs=[]
count=0 #set counter to 0
for playerid,playerdata in fg.items():
if(playerdata['MFG']>count): #if most misses so far
count=playerdata['MFG']
fgs=[[playerdata['Name'],playerdata['MFG']]]
elif (playerdata['MFG']==count): #if same as most misses
fgs.append([playerdata['Name'],playerdata['MFG']])
return fgs
##############################################################
# MostDrops(dict of players)
# most drops
#
# Params:
# dict of players
# Returns:
# most drops
def MostDrops(OffAndDef):
drops=[]
count=0 #set drops to 0
for player in OffAndDef:
if(OffAndDef[player]['Drops']>count):
count=OffAndDef[player]['Drops']
drops=[[OffAndDef[player]['name'],OffAndDef[player]['Drops']]]
elif(OffAndDef[player]['Drops']==count):
drops.append([OffAndDef[player]['name'],OffAndDef[player]['Drops']])
return drops
path= os.path.dirname(os.path.abspath(__file__)) #set path to current location
f=open(path+'/OffAndDef.json','r') #open separated files
OffAndDef=json.load(f)
f.close()
f=open(path+'/Penalties.json','r')
penalties=json.load(f)
f.close()
f=open(path+'/FG.json','r')
fg=json.load(f)
f.close()
print("\n")
print("Name: Zac Conley")
print("Assignment: A03 - Nfl Stats")
print("Date: 2/10/19")
print("==================================================================================")
print("Question 1: Find the player(s) that played for the most teams.")
playerlist=MostTeams(OffAndDef)
for p in playerlist:
print(str(p[0]) + ": "+ str(p[1]) +" teams\n")
print("==================================================================================")
print("Question 2: Find the player(s) that played for multiple teams in one year.")
ans=MostTeamsOneYear(OffAndDef)
count=0
for numteams in ans.items():
for player in numteams[1].items():
print(player[1]+": " +player[0]+" "+str(numteams[0])+" teams." )
print
print("==================================================================================")
print("Question 3: Find the player(s) that had the most yards rushed for a loss.")
ans=NegativeRushingYards(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" rushing yards.\n")
print("==================================================================================")
print("Question 4: Find the player(s) that had the most rushes for a loss.")
ans=NegativeRushes(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" negative rushes.\n")
print("==================================================================================")
print("Question 5: Find the player(s) with the most number of passes for a loss.")
ans=MostPassForLoss(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" negative passes.\n")
temp=[]
print("==================================================================================")
print("Question 6: Find the team with the most penalties.")
ans=MostPenalties(penalties)
for team in ans:
print(str(team[0])+" had "+str(team[1])+" penalties.\n")
print("==================================================================================")
print("Question 7: Find the team with the most yards in penalties.")
ans=TeamPenaltyYards(penalties)
for team in ans:
print(team[0]+": "+str(int(team[1]))+" penalty yards.\n")
print("==================================================================================")
print("Question 8: Find the correlation between most penalized teams and games won / lost.")
ans=PenaltyWins(penalties)
print("Most Penalties: "+ans[0][0]+": "+str(ans[0][1]) +"-" +str(ans[0][2]))
print("Least Penalties: "+ans[1][0]+" : "+str(ans[1][1])+"-" +str(ans[1][2])+"\n")
print("==================================================================================")
print("Question 9: Average number of plays in a game. (This may take up to a minute.)")
ans=AverageNumberOfPlays()
print("On average, there are " +str(ans) +" plays each game. \n")
print("==================================================================================")
print("Question 10: Longest field goal.")
ans=LongestFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" yards.\n")
print("==================================================================================")
print("Question 11: Most field goals.")
ans=MostFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" FGs.\n")
print("==================================================================================")
print("Question 12: Most missed field goals.")
ans=MostMFG(fg)
for player in ans:
print(player[0]+": "+str(player[1])+" missed FGs.\n")
print("==================================================================================")
print("Question 13: Most dropped passes.")
ans=MostDrops(OffAndDef)
for player in ans:
print(player[0]+": "+str(player[1])+" drops.")
|
normal
|
{
"blob_id": "2a4f57cd0fc1c50cba06c285849432c6f71f28e2",
"index": 2642,
"step-1": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\n<mask token>\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\n<mask token>\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\n<mask token>\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\n<mask token>\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\n<mask token>\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\ndef PenaltyWins(penalties):\n x = MostPenalties(penalties)\n mostPenalized = []\n for temp in x:\n mostPenalized.append(team[0])\n least = penalties[mostPenalized[0]]['Penalties']\n mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],\n penalties[mostPenalized[0]]['Losses']]]\n leastTeam = []\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] < least:\n least = teamdata['Penalties']\n leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]\n elif teamdata['Penalties'] == least:\n leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])\n mostandleast.append(leastTeam[0])\n return mostandleast\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\ndef NegativeRushingYards(OffAndDef):\n NegRushYds = []\n yds = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['NegRushYards'] < yds:\n yds = playerdata['NegRushYards']\n NegRushYds = [[playerdata['name'], playerdata['NegRushYards']]]\n elif playerdata['NegRushYards'] == yds:\n NegRushYds.append([playerdata['name'], playerdata['NegRushYards']])\n return NegRushYds\n\n\n<mask token>\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\n<mask token>\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\ndef PenaltyWins(penalties):\n x = MostPenalties(penalties)\n mostPenalized = []\n for temp in x:\n mostPenalized.append(team[0])\n least = penalties[mostPenalized[0]]['Penalties']\n mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],\n penalties[mostPenalized[0]]['Losses']]]\n leastTeam = []\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] < least:\n least = teamdata['Penalties']\n leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]\n elif teamdata['Penalties'] == least:\n leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])\n mostandleast.append(leastTeam[0])\n return mostandleast\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef MostTeams(OffAndDef):\n most = []\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n if len(playerdata['Teams']) > count:\n count = len(playerdata['Teams'])\n most = [[playerdata['name'], len(playerdata['Teams'])]]\n elif len(playerdata['Teams']) == count:\n most.append([playerdata['name'], len(playerdata['Teams'])])\n return most\n\n\ndef MostTeamsOneYear(OffAndDef):\n teams = {}\n maximum = {}\n count = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['name'] != '':\n for years in playerdata:\n if (years != 'Drops' and years != 'NegRushYards' and years !=\n 'NegRush' and years != 'Teams' and years !=\n 'PassForLoss' and years != 'name'):\n try:\n if len(playerdata[years]) > count:\n if len(playerdata[years]) not in teams.keys():\n teams.clear()\n teams[len(playerdata[years])] = {}\n teams[len(playerdata[years])][playerdata['name']\n ] = years\n count = len(playerdata[years])\n elif len(playerdata[years]) == count:\n teams[len(playerdata[years])].append(playerdata\n ['name'], years)\n except:\n pass\n return teams\n\n\ndef NegativeRushingYards(OffAndDef):\n NegRushYds = []\n yds = 0\n for playerid, playerdata in OffAndDef.items():\n if playerdata['NegRushYards'] < yds:\n yds = playerdata['NegRushYards']\n NegRushYds = [[playerdata['name'], playerdata['NegRushYards']]]\n elif playerdata['NegRushYards'] == yds:\n NegRushYds.append([playerdata['name'], playerdata['NegRushYards']])\n return NegRushYds\n\n\ndef NegativeRushes(OffAndDef):\n rushes = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['NegRush'] > att:\n att = OffAndDef[player]['NegRush']\n rushes = [[OffAndDef[player]['name'], OffAndDef[player]['NegRush']]\n ]\n elif OffAndDef[player]['NegRush'] == att:\n rushes.append([OffAndDef[player]['name'], OffAndDef[player][\n 'NegRush']])\n return rushes\n\n\ndef MostPassForLoss(OffAndDef):\n PassForLoss = []\n att = 0\n for player in OffAndDef:\n if OffAndDef[player]['PassForLoss'] > att:\n att = OffAndDef[player]['PassForLoss']\n PassForLoss = [[OffAndDef[player]['name'], OffAndDef[player][\n 'PassForLoss']]]\n elif OffAndDef[player]['PassForLoss'] == att:\n PassForLoss.append([OffAndDef[player]['name'], OffAndDef[player\n ]['PassForLoss']])\n return PassForLoss\n\n\ndef MostPenalties(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] > num:\n num = teamdata['Penalties']\n pens = [[teamname, teamdata['Penalties']]]\n elif teamdata['Penalties'] == num:\n pens.append([teamname, teamdata['Penalties']])\n return pens\n\n\ndef TeamPenaltyYards(penalties):\n pens = []\n num = 0\n for teamname, teamdata in penalties.items():\n if teamdata['PenaltyYards'] > num:\n num = teamdata['PenaltyYards']\n pens = [[teamname, teamdata['PenaltyYards']]]\n elif teamdata['PenaltyYards'] == num:\n pens.append([teamname, teamdata['PenaltyYards']])\n return pens\n\n\ndef PenaltyWins(penalties):\n x = MostPenalties(penalties)\n mostPenalized = []\n for temp in x:\n mostPenalized.append(team[0])\n least = penalties[mostPenalized[0]]['Penalties']\n mostandleast = [[mostPenalized[0], penalties[mostPenalized[0]]['Wins'],\n penalties[mostPenalized[0]]['Losses']]]\n leastTeam = []\n for teamname, teamdata in penalties.items():\n if teamdata['Penalties'] < least:\n least = teamdata['Penalties']\n leastTeam = [[teamname, teamdata['Wins'], teamdata['Losses']]]\n elif teamdata['Penalties'] == least:\n leastTeam.append([teamname, teamdata['Wins'], teamdata['Losses']])\n mostandleast.append(leastTeam[0])\n return mostandleast\n\n\ndef AverageNumberOfPlays():\n games = 0\n plays = 0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__)) +\n '/stats'):\n with open(os.path.dirname(os.path.abspath(__file__)) + '/stats/' +\n filename, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items():\n if gameid != 'nextupdate':\n games += 1\n for driveid, drivedata in gamedata['drives'].items():\n if driveid != 'crntdrv':\n plays += drivedata['numplays']\n avgplays = plays / games\n return avgplays\n\n\ndef LongestFG(fg):\n fgs = []\n length = 0\n for playerid, playerdata in fg.items():\n if playerdata['Long'] > length:\n length = playerdata['Long']\n fgs = [[playerdata['Name'], playerdata['Long']]]\n elif playerdata['Long'] == length:\n fgs.append([playerdata['Name'], playerdata['Long']])\n return fgs\n\n\ndef MostFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['FG'] > count:\n count = playerdata['FG']\n fgs = [[playerdata['Name'], playerdata['FG']]]\n elif playerdata['FG'] == count:\n fgs.append([playerdata['Name'], playerdata['FG']])\n return fgs\n\n\ndef MostMFG(fg):\n fgs = []\n count = 0\n for playerid, playerdata in fg.items():\n if playerdata['MFG'] > count:\n count = playerdata['MFG']\n fgs = [[playerdata['Name'], playerdata['MFG']]]\n elif playerdata['MFG'] == count:\n fgs.append([playerdata['Name'], playerdata['MFG']])\n return fgs\n\n\ndef MostDrops(OffAndDef):\n drops = []\n count = 0\n for player in OffAndDef:\n if OffAndDef[player]['Drops'] > count:\n count = OffAndDef[player]['Drops']\n drops = [[OffAndDef[player]['name'], OffAndDef[player]['Drops']]]\n elif OffAndDef[player]['Drops'] == count:\n drops.append([OffAndDef[player]['name'], OffAndDef[player][\n 'Drops']])\n return drops\n\n\npath = os.path.dirname(os.path.abspath(__file__))\nf = open(path + '/OffAndDef.json', 'r')\nOffAndDef = json.load(f)\nf.close()\nf = open(path + '/Penalties.json', 'r')\npenalties = json.load(f)\nf.close()\nf = open(path + '/FG.json', 'r')\nfg = json.load(f)\nf.close()\nprint('\\n')\nprint('Name: Zac Conley')\nprint('Assignment: A03 - Nfl Stats')\nprint('Date: 2/10/19')\nprint(\n '=================================================================================='\n )\nprint('Question 1: Find the player(s) that played for the most teams.')\nplayerlist = MostTeams(OffAndDef)\nfor p in playerlist:\n print(str(p[0]) + ': ' + str(p[1]) + ' teams\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 2: Find the player(s) that played for multiple teams in one year.'\n )\nans = MostTeamsOneYear(OffAndDef)\ncount = 0\nfor numteams in ans.items():\n for player in numteams[1].items():\n print(player[1] + ': ' + player[0] + ' ' + str(numteams[0]) + ' teams.'\n )\nprint\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 3: Find the player(s) that had the most yards rushed for a loss.'\n )\nans = NegativeRushingYards(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' rushing yards.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 4: Find the player(s) that had the most rushes for a loss.')\nans = NegativeRushes(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' negative rushes.\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 5: Find the player(s) with the most number of passes for a loss.'\n )\nans = MostPassForLoss(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' negative passes.\\n')\ntemp = []\nprint(\n '=================================================================================='\n )\nprint('Question 6: Find the team with the most penalties.')\nans = MostPenalties(penalties)\nfor team in ans:\n print(str(team[0]) + ' had ' + str(team[1]) + ' penalties.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 7: Find the team with the most yards in penalties.')\nans = TeamPenaltyYards(penalties)\nfor team in ans:\n print(team[0] + ': ' + str(int(team[1])) + ' penalty yards.\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 8: Find the correlation between most penalized teams and games won / lost.'\n )\nans = PenaltyWins(penalties)\nprint('Most Penalties: ' + ans[0][0] + ': ' + str(ans[0][1]) + '-' + str(\n ans[0][2]))\nprint('Least Penalties: ' + ans[1][0] + ' : ' + str(ans[1][1]) + '-' + str(\n ans[1][2]) + '\\n')\nprint(\n '=================================================================================='\n )\nprint(\n 'Question 9: Average number of plays in a game. (This may take up to a minute.)'\n )\nans = AverageNumberOfPlays()\nprint('On average, there are ' + str(ans) + ' plays each game. \\n')\nprint(\n '=================================================================================='\n )\nprint('Question 10: Longest field goal.')\nans = LongestFG(fg)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' yards.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 11: Most field goals.')\nans = MostFG(fg)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' FGs.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 12: Most missed field goals.')\nans = MostMFG(fg)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' missed FGs.\\n')\nprint(\n '=================================================================================='\n )\nprint('Question 13: Most dropped passes.')\nans = MostDrops(OffAndDef)\nfor player in ans:\n print(player[0] + ': ' + str(player[1]) + ' drops.')\n",
"step-5": "import json\nimport os\nimport sys\n\"\"\"\nCourse: cmps 4883\nAssignemt: A03\nDate: 2/10/19\nGithub username: acdczlc\nRepo url: https://github.com/acdczlc/4883-SWTools-Conley\nName: Zac Conley\nDescription: \n Calculates all stats for questions about stats\n\n\"\"\"\n##############################################################\n# MostTeams(dict of off and def players)\n# gets player who played for most teams\n# \n# Params: \n# dict of players\n# Returns: \n# player with most teams\ndef MostTeams(OffAndDef):\n most=[]\n count=0 # set comparison\n for playerid, playerdata in OffAndDef.items():\n if(playerdata['name']!=''): #only get real players\n if(len(playerdata['Teams'])>count):\n count=len(playerdata['Teams']) #get count\n most=[[playerdata['name'],len(playerdata['Teams'])]] # replace with player\n elif(len(playerdata['Teams'])==count):\n most.append([playerdata['name'],len(playerdata['Teams'])]) # add multiple teams\n return most\n\n##############################################################\n# MostTeamsOneYear(dict of off and def players)\n# gets player who played for most teams in one year\n# \n# Params: \n# dict of players\n# Returns: \n# player with most teams\ndef MostTeamsOneYear(OffAndDef):\n teams={}\n maximum={}\n count=0\n for playerid, playerdata in OffAndDef.items():\n if(playerdata['name']!=''):\n for years in playerdata: #avoids all keys except years \n if(years!='Drops' and years!='NegRushYards' and years!='NegRush' and years!='Teams' and years!='PassForLoss' and years!=\"name\"):\n try: #try block to avoid nonplayers\n if(len(playerdata[years])>count): # if player has most teams so far\n if((len(playerdata[years]) not in teams.keys())): \n teams.clear() # delete all previous players\n teams[len(playerdata[years])]={}\n teams[len(playerdata[years])][playerdata['name']]=years\n count=len(playerdata[years])\n elif(len(playerdata[years])==count): #multiple players have the same number of teams\n teams[len(playerdata[years])].append(playerdata['name'],years)\n except:\n pass\n\n return teams\n##############################################################\n# NegativeRushingYards(dict of off and def players)\n# gets player with most negative rushing yards\n# \n# Params: \n# dict of players\n# Returns: \n# player with most negative rushing yards\ndef NegativeRushingYards(OffAndDef):\n NegRushYds=[]\n yds=0\n for playerid, playerdata in OffAndDef.items():\n if(playerdata['NegRushYards']<yds):\n yds=playerdata['NegRushYards']\n NegRushYds=[[playerdata['name'],playerdata['NegRushYards']]]\n elif(playerdata['NegRushYards']==yds):\n NegRushYds.append([playerdata['name'],playerdata['NegRushYards']])\n return NegRushYds\n##############################################################\n# NegativeRushes(dict of off and def players)\n# gets player with most negative rushes\n# \n# Params: \n# dict of players\n# Returns: \n# player with most negative rushes\ndef NegativeRushes(OffAndDef):\n rushes=[]\n att=0 #attempts\n for player in OffAndDef:\n if(OffAndDef[player]['NegRush']>att):\n att=OffAndDef[player]['NegRush']\n rushes=[[OffAndDef[player]['name'],OffAndDef[player]['NegRush']]]\n elif(OffAndDef[player]['NegRush']==att):\n rushes.append([OffAndDef[player]['name'],OffAndDef[player]['NegRush']])\n return rushes \n##############################################################\n# MostPassForLoss(dict of off and def players)\n# gets player with most negative rushes\n# \n# Params: \n# dict of players\n# Returns: \n# player with most negative rushes\ndef MostPassForLoss(OffAndDef):\n PassForLoss=[]\n att=0 #attempts\n for player in OffAndDef:\n if(OffAndDef[player]['PassForLoss']>att):\n att=OffAndDef[player]['PassForLoss']\n PassForLoss=[[OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']]]\n elif(OffAndDef[player]['PassForLoss']==att):\n PassForLoss.append([OffAndDef[player]['name'],OffAndDef[player]['PassForLoss']])\n return PassForLoss \n\n##############################################################\n# MostPenalties(dict of team penalties)\n# gets team with most penalties\n# \n# Params: \n# dict of teams\n# Returns: \n# player with most negative rushes\ndef MostPenalties(penalties):\n pens=[]\n num=0\n for teamname,teamdata in penalties.items():\n if(teamdata['Penalties']>num):\n num=teamdata['Penalties']\n pens=[[teamname,teamdata['Penalties']]]\n elif (teamdata['Penalties']==num):\n pens.append([teamname,teamdata['Penalties']])\n return pens\n \n##############################################################\n# TeamPenaltyYards(dict of team penalties)\n# gets team with most penaltiy yards\n# \n# Params: \n# dict of teams\n# Returns: \n# team with most penalty yards\ndef TeamPenaltyYards(penalties):\n pens=[]\n num=0\n for teamname,teamdata in penalties.items():\n if(teamdata['PenaltyYards']>num):\n num=teamdata['PenaltyYards']\n pens=[[teamname,teamdata['PenaltyYards']]]\n elif (teamdata['PenaltyYards']==num):\n pens.append([teamname,teamdata['PenaltyYards']])\n return pens\n##############################################################\n# PenaltyWins(most penalized team,dict of team penalties)\n# shows correlation between penalty and record\n# \n# Params: \n# dict of teams, most penalized team\n# Returns: \n# team with most penaltys and least\ndef PenaltyWins(penalties):\n x=MostPenalties(penalties) #calls function to get most penalized team\n mostPenalized=[]\n for temp in x:\n mostPenalized.append(team[0])\n least=penalties[mostPenalized[0]]['Penalties']\n mostandleast=[[mostPenalized[0],penalties[mostPenalized[0]]['Wins'],penalties[mostPenalized[0]]['Losses']]] # sets most penalized record\n leastTeam=[]\n for teamname, teamdata in penalties.items():\n if(teamdata['Penalties']<least):\n least=teamdata['Penalties']\n leastTeam=[[teamname,teamdata['Wins'],teamdata['Losses']]]\n elif (teamdata['Penalties']==least):\n leastTeam.append([teamname,teamdata['Wins'],teamdata['Losses']])\n mostandleast.append(leastTeam[0]) #adds team and record to list at end\n return mostandleast\n\n##############################################################\n# AverageNumberOfPlays()\n# shows average number of plays\n# \n# Params: \n# none\n# Returns: \n# avg number of plays\ndef AverageNumberOfPlays():\n games=0\n plays=0\n for filename in os.listdir(os.path.dirname(os.path.abspath(__file__))+'/stats'): # sets path to all stats\n with open(os.path.dirname(os.path.abspath(__file__))+\"/stats/\"+filename,\"r\") as json_file:\n try: #gets all stats and stores each game in a dict\n data=json.load(json_file)\n except:\n pass\n else:\n for gameid, gamedata in data.items(): \n if(gameid!=\"nextupdate\"):\n games+=1 #increment number of games\n for driveid, drivedata in gamedata['drives'].items():\n if(driveid!=\"crntdrv\"):\n plays+=drivedata['numplays'] #increment number of plays\n avgplays=plays/games\n return avgplays\n##############################################################\n# LongestFG(dict of fgs)\n# longest field goal\n# \n# Params: \n# dict of fgs\n# Returns: \n# longest field goal and kicker\ndef LongestFG(fg):\n fgs=[]\n length=0 #longest fg\n for playerid,playerdata in fg.items():\n if(playerdata['Long']>length):\n length=playerdata['Long']\n fgs=[[playerdata['Name'],playerdata['Long']]]\n elif (playerdata['Long']==length):\n fgs.append([playerdata['Name'],playerdata['Long']])\n return fgs\n##############################################################\n# MostFG(dict of fgs)\n# most made field goals\n# \n# Params: \n# dict of fgs\n# Returns: \n# most made field goals and kicker\ndef MostFG(fg):\n fgs=[]\n count=0 #sets counter to 0\n for playerid,playerdata in fg.items():\n if(playerdata['FG']>count): #if largest number of fg so far\n count=playerdata['FG']\n fgs=[[playerdata['Name'],playerdata['FG']]]\n elif (playerdata['FG']==count): #if same number of fg\n fgs.append([playerdata['Name'],playerdata['FG']])\n return fgs\n##############################################################\n# MostMFG(dict of fgs)\n# most missed field goals\n# \n# Params: \n# dict of fgs\n# Returns: \n# most missed field goals and kicker\ndef MostMFG(fg):\n fgs=[]\n count=0 #set counter to 0\n for playerid,playerdata in fg.items():\n if(playerdata['MFG']>count): #if most misses so far\n count=playerdata['MFG']\n fgs=[[playerdata['Name'],playerdata['MFG']]]\n elif (playerdata['MFG']==count): #if same as most misses\n fgs.append([playerdata['Name'],playerdata['MFG']])\n return fgs\n##############################################################\n# MostDrops(dict of players)\n# most drops\n# \n# Params: \n# dict of players\n# Returns: \n# most drops\ndef MostDrops(OffAndDef):\n drops=[] \n count=0 #set drops to 0\n for player in OffAndDef:\n if(OffAndDef[player]['Drops']>count):\n count=OffAndDef[player]['Drops']\n drops=[[OffAndDef[player]['name'],OffAndDef[player]['Drops']]]\n elif(OffAndDef[player]['Drops']==count):\n drops.append([OffAndDef[player]['name'],OffAndDef[player]['Drops']])\n return drops\n\npath= os.path.dirname(os.path.abspath(__file__)) #set path to current location\nf=open(path+'/OffAndDef.json','r') #open separated files\nOffAndDef=json.load(f)\nf.close()\nf=open(path+'/Penalties.json','r') \npenalties=json.load(f)\nf.close()\nf=open(path+'/FG.json','r')\nfg=json.load(f)\nf.close()\nprint(\"\\n\")\nprint(\"Name: Zac Conley\")\nprint(\"Assignment: A03 - Nfl Stats\")\nprint(\"Date: 2/10/19\")\nprint(\"==================================================================================\")\nprint(\"Question 1: Find the player(s) that played for the most teams.\")\nplayerlist=MostTeams(OffAndDef)\nfor p in playerlist:\n print(str(p[0]) + \": \"+ str(p[1]) +\" teams\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 2: Find the player(s) that played for multiple teams in one year.\")\nans=MostTeamsOneYear(OffAndDef)\ncount=0\nfor numteams in ans.items():\n for player in numteams[1].items():\n print(player[1]+\": \" +player[0]+\" \"+str(numteams[0])+\" teams.\" )\nprint\nprint(\"==================================================================================\")\nprint(\"Question 3: Find the player(s) that had the most yards rushed for a loss.\")\nans=NegativeRushingYards(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" rushing yards.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 4: Find the player(s) that had the most rushes for a loss.\")\nans=NegativeRushes(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" negative rushes.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 5: Find the player(s) with the most number of passes for a loss.\")\nans=MostPassForLoss(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" negative passes.\\n\")\ntemp=[]\nprint(\"==================================================================================\")\nprint(\"Question 6: Find the team with the most penalties.\")\nans=MostPenalties(penalties)\nfor team in ans:\n print(str(team[0])+\" had \"+str(team[1])+\" penalties.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 7: Find the team with the most yards in penalties.\")\nans=TeamPenaltyYards(penalties)\nfor team in ans:\n print(team[0]+\": \"+str(int(team[1]))+\" penalty yards.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 8: Find the correlation between most penalized teams and games won / lost.\")\nans=PenaltyWins(penalties)\nprint(\"Most Penalties: \"+ans[0][0]+\": \"+str(ans[0][1]) +\"-\" +str(ans[0][2]))\nprint(\"Least Penalties: \"+ans[1][0]+\" : \"+str(ans[1][1])+\"-\" +str(ans[1][2])+\"\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 9: Average number of plays in a game. (This may take up to a minute.)\")\nans=AverageNumberOfPlays()\nprint(\"On average, there are \" +str(ans) +\" plays each game. \\n\")\nprint(\"==================================================================================\")\nprint(\"Question 10: Longest field goal.\")\nans=LongestFG(fg)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" yards.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 11: Most field goals.\")\nans=MostFG(fg)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" FGs.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 12: Most missed field goals.\")\nans=MostMFG(fg)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" missed FGs.\\n\")\nprint(\"==================================================================================\")\nprint(\"Question 13: Most dropped passes.\")\nans=MostDrops(OffAndDef)\nfor player in ans:\n print(player[0]+\": \"+str(player[1])+\" drops.\")",
"step-ids": [
8,
9,
10,
15,
17
]
}
|
[
8,
9,
10,
15,
17
] |
import swipe
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):#就诊时间
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间
driver.find_element_by_name('23:00').click()#时间
driver.find_element_by_name('00').click()#分钟
driver.find_element_by_name('15分钟').click()#时长
driver.find_element_by_name('完成').click()
def data(driver):#就诊日期
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期
driver.find_element_by_name('完成').click()
def patient(driver):#患者
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
#driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')
#driver.find_element_by_id('com.dentist.android:id/contactLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):#就诊地点
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点
driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
def project(driver):#治疗项目
driver.find_element_by_name('牙位/治疗项目').click()
driver.find_element_by_name('修复').click()
driver.find_element_by_name('备牙').click()
driver.find_element_by_name('保存').click()
swipe.swipeUp(driver)
driver.find_element_by_name('发起预约').click()
driver.find_element_by_name('继续保存').click()
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
|
normal
|
{
"blob_id": "02bc97b963b970993fc947cfa41c73230dd4d9e4",
"index": 2649,
"step-1": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\n<mask token>\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-3": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\ndef project(driver):\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-4": "import swipe\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\ndef project(driver):\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-5": "import swipe\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):#就诊时间\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间\n driver.find_element_by_name('23:00').click()#时间\n driver.find_element_by_name('00').click()#分钟\n driver.find_element_by_name('15分钟').click()#时长\n driver.find_element_by_name('完成').click()\n\ndef data(driver):#就诊日期\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期\n driver.find_element_by_name('完成').click()\n\ndef patient(driver):#患者\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n #driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')\n #driver.find_element_by_id('com.dentist.android:id/contactLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\ndef site(driver):#就诊地点\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点\n driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\ndef project(driver):#治疗项目\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#!/usr/bin/python
import argparse
import os
import pipes
import sys
import rospy
import std_msgs.msg
import actionlib
import time
import datetime
from geometry_msgs.msg import Pose, Point, Quaternion
from actionlib import *
from location_provider.srv import GetLocationList
from std_srvs.srv import Trigger
try:
import rosplan_interface as planner
import state_machine.msg
from name_provider.srv import GetRealName, CreateNewPerson
except:
pass
from constructs import *
from parser import FnHintParser
name = "sprinkles"
tts = rospy.Publisher("tosay", std_msgs.msg.String, queue_size=5)
valid_in = rospy.Publisher("heard", std_msgs.msg.String, queue_size=5)
loc = rospy.ServiceProxy('/get_location_list', GetLocationList)
locations = None
nicegeneric = S('please') | 'will you'
nicepre = nicegeneric | S('go ahead and')
nicepost = nicegeneric | (~S('right') + 'now')
master = FnHintParser()
face_last_seen = None
currently_speaking = False
TIME_TOLERANCE = 5
def voice_callback(msg):
global currently_speaking
if not msg.data:
time.sleep(2)
currently_speaking = msg.data
def face_callback(_):
global face_last_seen
face_last_seen = datetime.datetime.now()
def face_active():
if face_last_seen is None:
return False
active = face_last_seen > datetime.datetime.now() - datetime.timedelta(seconds=TIME_TOLERANCE)
# rospy.loginfo("Face detected? %s" % str(active))
return active
@master.register_fn(keywords='dir')
def go(place):
global currently_speaking
currently_speaking = True
_say("Okay. I will go to the " + str(place))
return planner.gen_predicate('robotat', x=place)
@master.register_fn()
def say(info):
msg = ""
if info == "name":
msg = "I am " + name
elif info in ['identification', 'id']:
msg = "I am a people bot"
elif info in ['hello', 'hi']:
msg = "Hello there."
elif info == "love":
msg = "Not particularly"
elif info == "friend":
msg = "I am a people bot. I am a friend to everyone."
elif info == 'joke':
msg = os.popen('fortune riddles | sed "s/Q://" | sed "s/A://" | tr "\n\t" " "').read()
if msg:
_say(msg)
def _say(msg):
rospy.loginfo("Saying: " + msg)
tts.publish(std_msgs.msg.String(msg))
@master.register_fn()
def halt():
planner.cancel()
_say("Stopping")
@master.register_fn()
def spin(direction='around'):
_say('No')
return
@master.register_fn()
def bring_msg(source, dest):
msg_name = planner.add_instance('message')
planner.add_predicate('has_message', person=source, msg=msg_name)
return planner.gen_predicate('has_message', person=dest, msg=msg_name)
class InteractServer(object):
def __init__(self, name):
self.active = False
self._action_name = name
self.name = None
self.nl_listen = False
self.goals = []
self._feedback = state_machine.msg.interactFeedback()
self._server = SimpleActionServer("interact",
state_machine.msg.interactAction,
execute_cb=self.execute_cb,
auto_start = False)
self._server.start()
rospy.loginfo( "Interact Server started")
def speech_callback(self, topic_data, parser):
rospy.loginfo("============")
rospy.loginfo('%s, speaking: %s' % (topic_data.data, str(currently_speaking)))
if self.active and not currently_speaking and not self.nl_listen and topic_data.data:
valid_in.publish(std_msgs.msg.String(str(topic_data.data)))
rospy.loginfo("Interpreting...")
goal_s = parser.parse_and_run(topic_data.data)
rospy.loginfo("Result: %s", str(goal_s))
if hasattr(goal_s, '__iter__'):
self.goals.extend(goal_s)
elif goal_s is not None:
self.goals.append(goal_s)
rospy.loginfo("============")
def check_name(self, id):
self.name = None
rospy.loginfo("Checking name... %d" % id)
if not hasattr(self, '_name_service'):
self._name_service = rospy.ServiceProxy('/get_real_name', GetRealName)
try:
rospy.wait_for_service('/get_real_name', 10)
except:
rospy.logwarn("Timeout waiting for person db")
return
res = self._name_service(id)
if res.found_name:
self.name = res.name
_say("Hello %s." % self.name)
else:
_say("I do not recognize you.")
self.set_name(id)
def set_name(self, id=None, *args, **kwargs):
if id is None:
id = self.id
if not hasattr(self, '_nl_service'):
self._nl_service = rospy.ServiceProxy('/nl_recognizer/nl_listen', Trigger)
if not hasattr(self, '_mkname_service'):
self._mkname_service = rospy.ServiceProxy('/create_new_person', CreateNewPerson)
try:
rospy.wait_for_service('/nl_recognizer/nl_listen', 10)
except:
rospy.logwarn("Timeout waiting for listener")
return False
try:
rospy.wait_for_service('/create_new_person', 10)
except:
rospy.logwarn("Timeout waiting for person creation")
return False
_say("Please state your name.")
rospy.sleep(4)
self.nl_listen = True
newname = self._nl_service()
self.nl_listen = False
if newname.success:
valid_in.publish(std_msgs.msg.String(str(newname.message)))
self._mkname_service(newname.message, id)
self.name = newname.message
_say("Hello %s." % self.name)
return newname.message
else:
_say("I didn't catch that. Continuing on.")
return False
def execute_cb(self, goal):
self.id = goal.personID
#print goal.goal_id
if self.active:
if False and self._server.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._server.set_preempted()
self.active = False
return
rospy.loginfo("interacting")
self.active = True
#self._feedback.isInteracting = True
# _say("Hello there.")
self.check_name(goal.personID)
_say("Please say a command")
#self._server.publish_feedback(self._feedback)
time.sleep(3)
while face_active():
time.sleep(.5)
if not self.active:
# We're dead, don't send bad info
return
self.active = False
rospy.loginfo("done interacting")
res = state_machine.msg.interactResult()
res.action = self.goals
self.goals = []
_say('Goodbye')
if res.action:
self._server.set_succeeded(res)
else:
self._server.set_aborted()
def get_cmd():
global locations
rospy.wait_for_service('/get_location_list')
locations = loc().output
loc_syntax = (reduce(lambda x, y: x | y, locations, S(locations.pop())))
cmd = ~S('okay') + S(name) + ~nicepre + (
(S('change my name') % 'set_name') |
((S('move') | 'go' | 'drive') % 'go' +
((S('to') + ~S('the') +
loc_syntax % 'place'))) |
(S('stop') | 'halt' | 'exit') % 'halt' |
((S('spin') % 'spin' | S('turn') % 'go') + (S('around') | 'left' | 'right') % 'direction') |
((S('say') | 'tell me' | 'speak' | 'what is' | 'what\'s') % 'say' + ~(S('your') | 'a') +
(S('name') | 'identification' | 'id' | 'hello' | 'hi' | 'joke') % 'info') |
(S("where are you going") % 'where') |
((S('take')|S('bring')|S('give')|S('send')) + ~S('a') + S("message") % 'bring_msg' + ~(S('from') + ~S('the') + loc_syntax % 'source') + S('to') + ~S('the') + loc_syntax % 'dest')
)
return cmd
if __name__ == '__main__':
rospy.init_node('speech_interpreter')
master.register_syntax(get_cmd())
speech_topic = rospy.get_param('~speech_in', '/recognizer/output')
active_topic = rospy.get_param('~active', '/face_finder/closest_face')
planner.init()
srv = InteractServer(rospy.get_name())
master.register_fn(srv.set_name)
textin = rospy.Subscriber(speech_topic, std_msgs.msg.String, callback=srv.speech_callback, callback_args=master)
check_active = rospy.Subscriber(active_topic, rospy.msg.AnyMsg, callback=face_callback)
check_voice = rospy.Subscriber('/is_speaking', std_msgs.msg.Bool, callback=voice_callback)
rospy.spin()
|
normal
|
{
"blob_id": "7a0e7ede263727ef303ba23dff1949c3a7031360",
"index": 7423,
"step-1": "#!/usr/bin/python\nimport argparse\nimport os\nimport pipes\nimport sys\n\nimport rospy\nimport std_msgs.msg\nimport actionlib\nimport time\n\nimport datetime\nfrom geometry_msgs.msg import Pose, Point, Quaternion\nfrom actionlib import *\nfrom location_provider.srv import GetLocationList\nfrom std_srvs.srv import Trigger\n\ntry:\n import rosplan_interface as planner\n import state_machine.msg\n from name_provider.srv import GetRealName, CreateNewPerson\nexcept:\n pass\nfrom constructs import *\nfrom parser import FnHintParser\n\nname = \"sprinkles\"\n\n\ntts = rospy.Publisher(\"tosay\", std_msgs.msg.String, queue_size=5)\nvalid_in = rospy.Publisher(\"heard\", std_msgs.msg.String, queue_size=5)\nloc = rospy.ServiceProxy('/get_location_list', GetLocationList)\nlocations = None\nnicegeneric = S('please') | 'will you'\nnicepre = nicegeneric | S('go ahead and')\nnicepost = nicegeneric | (~S('right') + 'now')\n\nmaster = FnHintParser()\n\nface_last_seen = None\ncurrently_speaking = False\nTIME_TOLERANCE = 5\n\ndef voice_callback(msg):\n global currently_speaking \n if not msg.data:\n\ttime.sleep(2)\n currently_speaking = msg.data\n\ndef face_callback(_):\n global face_last_seen\n face_last_seen = datetime.datetime.now() \n\ndef face_active():\n if face_last_seen is None:\n return False\n active = face_last_seen > datetime.datetime.now() - datetime.timedelta(seconds=TIME_TOLERANCE)\n# rospy.loginfo(\"Face detected? %s\" % str(active))\n return active\n\[email protected]_fn(keywords='dir')\ndef go(place):\n global currently_speaking\n currently_speaking = True\n _say(\"Okay. I will go to the \" + str(place))\n return planner.gen_predicate('robotat', x=place)\n\n\[email protected]_fn()\ndef say(info):\n msg = \"\"\n if info == \"name\":\n msg = \"I am \" + name\n elif info in ['identification', 'id']:\n msg = \"I am a people bot\"\n elif info in ['hello', 'hi']:\n msg = \"Hello there.\"\n elif info == \"love\":\n msg = \"Not particularly\"\n elif info == \"friend\":\n msg = \"I am a people bot. I am a friend to everyone.\" \n elif info == 'joke':\n\tmsg = os.popen('fortune riddles | sed \"s/Q://\" | sed \"s/A://\" | tr \"\\n\\t\" \" \"').read()\n\t\n\n if msg:\n _say(msg)\n\ndef _say(msg):\n rospy.loginfo(\"Saying: \" + msg)\n tts.publish(std_msgs.msg.String(msg))\n\n\[email protected]_fn()\ndef halt():\n planner.cancel()\n _say(\"Stopping\")\n\n\[email protected]_fn()\ndef spin(direction='around'):\n _say('No')\n return\n\n\[email protected]_fn()\ndef bring_msg(source, dest):\n msg_name = planner.add_instance('message')\n planner.add_predicate('has_message', person=source, msg=msg_name)\n return planner.gen_predicate('has_message', person=dest, msg=msg_name)\n\n\n\nclass InteractServer(object):\n def __init__(self, name):\n self.active = False\n self._action_name = name\n\tself.name = None\n\tself.nl_listen = False\n self.goals = []\n self._feedback = state_machine.msg.interactFeedback()\n self._server = SimpleActionServer(\"interact\",\n state_machine.msg.interactAction,\n execute_cb=self.execute_cb,\n auto_start = False)\n self._server.start()\n\trospy.loginfo( \"Interact Server started\")\n\n def speech_callback(self, topic_data, parser):\n rospy.loginfo(\"============\")\n rospy.loginfo('%s, speaking: %s' % (topic_data.data, str(currently_speaking)))\n if self.active and not currently_speaking and not self.nl_listen and topic_data.data:\n\t valid_in.publish(std_msgs.msg.String(str(topic_data.data)))\n rospy.loginfo(\"Interpreting...\")\n goal_s = parser.parse_and_run(topic_data.data)\n rospy.loginfo(\"Result: %s\", str(goal_s))\n if hasattr(goal_s, '__iter__'):\n self.goals.extend(goal_s)\n elif goal_s is not None:\n self.goals.append(goal_s)\n rospy.loginfo(\"============\")\n\n def check_name(self, id):\n\tself.name = None\n\trospy.loginfo(\"Checking name... %d\" % id)\n\tif not hasattr(self, '_name_service'):\n\t self._name_service = rospy.ServiceProxy('/get_real_name', GetRealName)\n\ttry:\n\t rospy.wait_for_service('/get_real_name', 10)\n\texcept:\n\t rospy.logwarn(\"Timeout waiting for person db\")\n\t return\n\tres = self._name_service(id) \n\tif res.found_name:\n\t self.name = res.name\n\t _say(\"Hello %s.\" % self.name)\n\telse:\n\t _say(\"I do not recognize you.\")\n\t self.set_name(id)\n\n def set_name(self, id=None, *args, **kwargs):\n\tif id is None:\n\t id = self.id\n\tif not hasattr(self, '_nl_service'): \n\t self._nl_service = rospy.ServiceProxy('/nl_recognizer/nl_listen', Trigger)\n\tif not hasattr(self, '_mkname_service'): \n\t self._mkname_service = rospy.ServiceProxy('/create_new_person', CreateNewPerson)\n\ttry:\n\t rospy.wait_for_service('/nl_recognizer/nl_listen', 10)\n\texcept:\n\t rospy.logwarn(\"Timeout waiting for listener\")\n\t return False\n\ttry:\n\t rospy.wait_for_service('/create_new_person', 10)\n\texcept:\n\t rospy.logwarn(\"Timeout waiting for person creation\")\n\t return False\n\t_say(\"Please state your name.\")\n\trospy.sleep(4)\n\tself.nl_listen = True\n\tnewname = self._nl_service()\n\tself.nl_listen = False\n\tif newname.success:\n\t valid_in.publish(std_msgs.msg.String(str(newname.message)))\n\t self._mkname_service(newname.message, id)\n\t self.name = newname.message\n\t _say(\"Hello %s.\" % self.name)\n\t return newname.message\n\telse:\n\t _say(\"I didn't catch that. Continuing on.\")\n\t return False\n\n\n def execute_cb(self, goal):\n\tself.id = goal.personID\n #print goal.goal_id\n if self.active:\n if False and self._server.is_preempt_requested():\n rospy.loginfo('%s: Preempted' % self._action_name)\n self._server.set_preempted()\n self.active = False\n return\n rospy.loginfo(\"interacting\")\n self.active = True\n #self._feedback.isInteracting = True\n\t# _say(\"Hello there.\")\n\tself.check_name(goal.personID)\n\t_say(\"Please say a command\")\n #self._server.publish_feedback(self._feedback)\n time.sleep(3)\n while face_active():\n time.sleep(.5)\n if not self.active:\n # We're dead, don't send bad info\n return\n self.active = False\n\n rospy.loginfo(\"done interacting\")\n res = state_machine.msg.interactResult()\n res.action = self.goals\n self.goals = []\n\t_say('Goodbye')\n if res.action:\n self._server.set_succeeded(res)\n else:\n self._server.set_aborted()\n\ndef get_cmd():\n global locations\n rospy.wait_for_service('/get_location_list')\n locations = loc().output\n loc_syntax = (reduce(lambda x, y: x | y, locations, S(locations.pop())))\n cmd = ~S('okay') + S(name) + ~nicepre + (\n\t(S('change my name') % 'set_name') |\n ((S('move') | 'go' | 'drive') % 'go' +\n ((S('to') + ~S('the') +\n loc_syntax % 'place'))) |\n (S('stop') | 'halt' | 'exit') % 'halt' |\n ((S('spin') % 'spin' | S('turn') % 'go') + (S('around') | 'left' | 'right') % 'direction') |\n ((S('say') | 'tell me' | 'speak' | 'what is' | 'what\\'s') % 'say' + ~(S('your') | 'a') +\n (S('name') | 'identification' | 'id' | 'hello' | 'hi' | 'joke') % 'info') |\n (S(\"where are you going\") % 'where') |\n\t((S('take')|S('bring')|S('give')|S('send')) + ~S('a') + S(\"message\") % 'bring_msg' + ~(S('from') + ~S('the') + loc_syntax % 'source') + S('to') + ~S('the') + loc_syntax % 'dest')\n )\n return cmd\n\n\nif __name__ == '__main__':\n rospy.init_node('speech_interpreter')\n master.register_syntax(get_cmd())\n speech_topic = rospy.get_param('~speech_in', '/recognizer/output')\n active_topic = rospy.get_param('~active', '/face_finder/closest_face')\n planner.init()\n\n srv = InteractServer(rospy.get_name())\n master.register_fn(srv.set_name)\n\n textin = rospy.Subscriber(speech_topic, std_msgs.msg.String, callback=srv.speech_callback, callback_args=master)\n check_active = rospy.Subscriber(active_topic, rospy.msg.AnyMsg, callback=face_callback)\n check_voice = rospy.Subscriber('/is_speaking', std_msgs.msg.Bool, callback=voice_callback)\n\n rospy.spin() \n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.db import models
# Create your models here.
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
|
normal
|
{
"blob_id": "9a55ccf758b4b2cc440153ab3b1f97823863a848",
"index": 165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Logins(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-4": "from django.db import models\n\n\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python2
#
# Author: Victor Ananjevsky, 2007 - 2010
# based on xdg-menu.py, written by Piotr Zielinski (http://www.cl.cam.ac.uk/~pz215/)
# License: GPL
#
# This script takes names of menu files conforming to the XDG Desktop
# Menu Specification, and outputs their FVWM equivalents to the
# standard output.
#
# http://standards.freedesktop.org/menu-spec/latest/
#
# Requirements:
# pyxdg, pygtk, gnome-menus
#
# Syntax:
# fvwm-xdg-menu.py [-d Menu] menufile1 menufile2 menufile3 ...
#
# Each menufile is an XDG menu description file.
# Icons of menu entries cached in $XDG_CACHE_HOME/fvwm/icons/menu
#
# For menufile name `recent' will be generated menu of recently opened files
#
# -d mean not print headers for toplevel menu (useful in DynamicPopupAction)
#
# Example:
# fvwm-xdg-menu.py /etc/xdg/menus/applications.menu
# fvwm-xdg-menu.py applications
#
import sys
import os
from optparse import OptionParser
import xdg.Menu
from xdg.DesktopEntry import *
from xdg.RecentFiles import *
from xdg.BaseDirectory import xdg_config_dirs, xdg_cache_home
import gtk
# fix for correct output of unicode chars without terminal
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
def cache_icon (icon):
''' cache an icon '''
icon_file = "%s/%s.png" % (cache_path, os.path.basename(icon))
if os.path.exists(icon_file):
return
full_icon = "%s.png" % icon
if os.path.exists(full_icon):
gtk.gdk.pixbuf_new_from_file_at_size(full_icon, options.icon_size, options.icon_size).save(icon_file, 'png')
return
try:
icon_theme.load_icon(icon, options.icon_size, gtk.ICON_LOOKUP_NO_SVG).save(icon_file, "png")
except:
pass
def parse_menu (menu, fvwm_menu = None):
''' parse menu file '''
prefix = "+"
if fvwm_menu == None:
print ''
print 'DestroyMenu "%s"' % menu
print 'AddToMenu "%s"' % menu
else:
print 'DestroyMenu recreate %s' % fvwm_menu
prefix = "AddToMenu %s" % fvwm_menu
for entry in menu.getEntries():
if isinstance(entry, xdg.Menu.Menu):
icon = entry.getIcon()
print u'%s "%s%%menu/folder.png%%" Popup "%s"' % (prefix, entry.getName(), entry)
elif isinstance(entry, xdg.Menu.MenuEntry):
desktop = DesktopEntry(entry.DesktopEntry.getFileName())
icon = desktop.getIcon()
ind = icon.rfind('.')
if ind != -1:
icon = icon[0:ind]
cmd = desktop.getExec().rstrip('%FUfu')
cache_icon(icon)
print u'%s "%s%%menu/%s.png%%" Exec exec %s' % (prefix, desktop.getName(), os.path.basename(icon), cmd)
else:
pass
for entry in menu.getEntries():
if isinstance(entry, xdg.Menu.Menu):
parse_menu(entry)
def parse_recent (fvwm_menu = None):
''' parse recently opened files '''
prefix = "+"
if fvwm_menu == None:
print ''
print 'DestroyMenu "Recent"'
print 'AddToMenu "Recent"'
else:
print 'DestroyMenu recreate %s' % fvwm_menu
prefix="AddToMenu %s" % fvwm_menu
rm = gtk.RecentManager()
for rf in rm.get_items():
print '%s "%s" Exec exec xdg-open "%s"' % (prefix, rf.get_display_name(), rf.get_uri())
# Start
cache_path = "%s/fvwm/menu" % xdg_cache_home
icon_theme = gtk.icon_theme_get_default()
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Parse commandline
parser = OptionParser()
parser.add_option("-d", "--dynamic", dest="fvwm_menu", default=None, help="Use in DynamicPopupAction", metavar="MENU")
parser.add_option("-i", "--icons", dest="icon_size", default=16, help="Set icons size", metavar="SIZE")
(options, args) = parser.parse_args()
for arg in args:
filename = ""
if os.path.exists(arg) or arg == "recent":
filename = arg
else:
tmpfile = "%s/menus/%s.menu" % (xdg_config_home, arg)
if os.path.exists(tmpfile):
filename = tmpfile
else:
for dir in xdg_config_dirs:
tmpfile = "%s/menus/%s.menu" % (dir, arg)
if os.path.exists(tmpfile):
filename = tmpfile
break
if filename == "":
continue
elif filename == "recent":
parse_recent (options.fvwm_menu)
else:
parse_menu(xdg.Menu.parse(filename), options.fvwm_menu)
|
normal
|
{
"blob_id": "214aadb7b3fc125da12f098bde87fce295349fdf",
"index": 1917,
"step-1": "#!/usr/bin/python2\n#\n# Author: Victor Ananjevsky, 2007 - 2010\n# based on xdg-menu.py, written by Piotr Zielinski (http://www.cl.cam.ac.uk/~pz215/)\n# License: GPL\n#\n# This script takes names of menu files conforming to the XDG Desktop\n# Menu Specification, and outputs their FVWM equivalents to the\n# standard output.\n#\n# http://standards.freedesktop.org/menu-spec/latest/\n#\n# Requirements:\n# pyxdg, pygtk, gnome-menus\n#\n# Syntax:\n# fvwm-xdg-menu.py [-d Menu] menufile1 menufile2 menufile3 ...\n#\n# Each menufile is an XDG menu description file.\n# Icons of menu entries cached in $XDG_CACHE_HOME/fvwm/icons/menu\n#\n# For menufile name `recent' will be generated menu of recently opened files\n#\n# -d mean not print headers for toplevel menu (useful in DynamicPopupAction)\n#\n# Example:\n# fvwm-xdg-menu.py /etc/xdg/menus/applications.menu\n# fvwm-xdg-menu.py applications\n#\n\n\nimport sys\nimport os\nfrom optparse import OptionParser\n\nimport xdg.Menu\nfrom xdg.DesktopEntry import *\nfrom xdg.RecentFiles import *\nfrom xdg.BaseDirectory import xdg_config_dirs, xdg_cache_home\n\nimport gtk\n\n# fix for correct output of unicode chars without terminal\nsys.stdout = codecs.getwriter('utf-8')(sys.stdout)\n\ndef cache_icon (icon):\n ''' cache an icon '''\n icon_file = \"%s/%s.png\" % (cache_path, os.path.basename(icon))\n if os.path.exists(icon_file):\n return\n full_icon = \"%s.png\" % icon\n if os.path.exists(full_icon):\n gtk.gdk.pixbuf_new_from_file_at_size(full_icon, options.icon_size, options.icon_size).save(icon_file, 'png')\n return\n try:\n icon_theme.load_icon(icon, options.icon_size, gtk.ICON_LOOKUP_NO_SVG).save(icon_file, \"png\")\n except:\n pass\n\ndef parse_menu (menu, fvwm_menu = None):\n ''' parse menu file '''\n prefix = \"+\"\n if fvwm_menu == None:\n print ''\n print 'DestroyMenu \"%s\"' % menu\n print 'AddToMenu \"%s\"' % menu\n else:\n print 'DestroyMenu recreate %s' % fvwm_menu\n prefix = \"AddToMenu %s\" % fvwm_menu\n\n for entry in menu.getEntries():\n\tif isinstance(entry, xdg.Menu.Menu):\n icon = entry.getIcon()\n print u'%s \"%s%%menu/folder.png%%\" Popup \"%s\"' % (prefix, entry.getName(), entry)\n\telif isinstance(entry, xdg.Menu.MenuEntry):\n desktop = DesktopEntry(entry.DesktopEntry.getFileName())\n icon = desktop.getIcon()\n ind = icon.rfind('.')\n if ind != -1:\n icon = icon[0:ind]\n cmd = desktop.getExec().rstrip('%FUfu')\n cache_icon(icon)\n print u'%s \"%s%%menu/%s.png%%\" Exec exec %s' % (prefix, desktop.getName(), os.path.basename(icon), cmd)\n\telse:\n\t pass\n\n for entry in menu.getEntries():\n\tif isinstance(entry, xdg.Menu.Menu):\n\t parse_menu(entry)\n\ndef parse_recent (fvwm_menu = None):\n ''' parse recently opened files '''\n prefix = \"+\"\n if fvwm_menu == None:\n print ''\n print 'DestroyMenu \"Recent\"'\n print 'AddToMenu \"Recent\"'\n else:\n print 'DestroyMenu recreate %s' % fvwm_menu\n prefix=\"AddToMenu %s\" % fvwm_menu\n \n rm = gtk.RecentManager()\n for rf in rm.get_items():\n print '%s \"%s\" Exec exec xdg-open \"%s\"' % (prefix, rf.get_display_name(), rf.get_uri())\n\n# Start\n\ncache_path = \"%s/fvwm/menu\" % xdg_cache_home\nicon_theme = gtk.icon_theme_get_default()\n\nif not os.path.exists(cache_path):\n os.makedirs(cache_path)\n\n# Parse commandline\n\nparser = OptionParser()\nparser.add_option(\"-d\", \"--dynamic\", dest=\"fvwm_menu\", default=None, help=\"Use in DynamicPopupAction\", metavar=\"MENU\")\nparser.add_option(\"-i\", \"--icons\", dest=\"icon_size\", default=16, help=\"Set icons size\", metavar=\"SIZE\")\n(options, args) = parser.parse_args()\n\nfor arg in args:\n filename = \"\"\n if os.path.exists(arg) or arg == \"recent\":\n filename = arg\n else:\n tmpfile = \"%s/menus/%s.menu\" % (xdg_config_home, arg)\n if os.path.exists(tmpfile):\n filename = tmpfile\n else:\n for dir in xdg_config_dirs:\n tmpfile = \"%s/menus/%s.menu\" % (dir, arg)\n if os.path.exists(tmpfile):\n filename = tmpfile\n break\n if filename == \"\":\n continue\n elif filename == \"recent\":\n parse_recent (options.fvwm_menu)\n else:\n parse_menu(xdg.Menu.parse(filename), options.fvwm_menu)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pygame
import time
from menus import MainMenu
from scenes import TestWorldGen
from scenes import TestAnimation
from scenes import TestLevel2
from scenes import MainGame
import random
class GameManager:
def __init__(self):
self.screen = pygame.display.set_mode((1280, 720),
flags=pygame.FULLSCREEN |
pygame.HWSURFACE |
pygame.DOUBLEBUF) # type: pygame.Surface
self.running = True
self.delta_time = 1
self.active_scene = None
# self.load_scene(MainMenu.MainMenu, (self,))
# self.load_scene(TestWorldGen.TestWorldGen, (self,))
# self.load_scene(TestAnimation.TestAnimation, (self,))
# self.load_scene(TestLevel2.TestLevel, (self, ))
self.load_scene(MainGame.MainGame, (self,))
self.fps_font = pygame.font.Font("game_data/fonts/calling_code.ttf", 14)
self.pygame_clock = pygame.time.Clock() # type: pygame
self.pygame_clock.tick()
pygame.joystick.init()
self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]
for joystick in self.joystick:
joystick.init()
random.seed(time.time())
self.player_joy = -1
def __del__(self):
self.exit()
def main_loop(self):
while self.running:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
self.exit()
self.delta_time = float(self.pygame_clock.tick(60)) / (10 ** 3)
fps_text = self.fps_font.render("FPS: {}".format(round(1 / self.delta_time)), False, (255, 255, 255))
self.active_scene.main_loop(events)
self.screen.blit(fps_text, (self.screen.get_width() - fps_text.get_width(), 0))
pygame.display.flip()
def load_scene(self, scene_object, scene_parameters):
self.active_scene = scene_object(*scene_parameters)
def exit(self):
self.running = False
|
normal
|
{
"blob_id": "91806afea92587476ac743346b88098b197a033c",
"index": 9706,
"step-1": "<mask token>\n\n\nclass GameManager:\n\n def __init__(self):\n self.screen = pygame.display.set_mode((1280, 720), flags=pygame.\n FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF)\n self.running = True\n self.delta_time = 1\n self.active_scene = None\n self.load_scene(MainGame.MainGame, (self,))\n self.fps_font = pygame.font.Font('game_data/fonts/calling_code.ttf', 14\n )\n self.pygame_clock = pygame.time.Clock()\n self.pygame_clock.tick()\n pygame.joystick.init()\n self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.\n joystick.get_count())]\n for joystick in self.joystick:\n joystick.init()\n random.seed(time.time())\n self.player_joy = -1\n <mask token>\n\n def main_loop(self):\n while self.running:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.exit()\n self.delta_time = float(self.pygame_clock.tick(60)) / 10 ** 3\n fps_text = self.fps_font.render('FPS: {}'.format(round(1 / self\n .delta_time)), False, (255, 255, 255))\n self.active_scene.main_loop(events)\n self.screen.blit(fps_text, (self.screen.get_width() - fps_text.\n get_width(), 0))\n pygame.display.flip()\n\n def load_scene(self, scene_object, scene_parameters):\n self.active_scene = scene_object(*scene_parameters)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GameManager:\n\n def __init__(self):\n self.screen = pygame.display.set_mode((1280, 720), flags=pygame.\n FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF)\n self.running = True\n self.delta_time = 1\n self.active_scene = None\n self.load_scene(MainGame.MainGame, (self,))\n self.fps_font = pygame.font.Font('game_data/fonts/calling_code.ttf', 14\n )\n self.pygame_clock = pygame.time.Clock()\n self.pygame_clock.tick()\n pygame.joystick.init()\n self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.\n joystick.get_count())]\n for joystick in self.joystick:\n joystick.init()\n random.seed(time.time())\n self.player_joy = -1\n\n def __del__(self):\n self.exit()\n\n def main_loop(self):\n while self.running:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.exit()\n self.delta_time = float(self.pygame_clock.tick(60)) / 10 ** 3\n fps_text = self.fps_font.render('FPS: {}'.format(round(1 / self\n .delta_time)), False, (255, 255, 255))\n self.active_scene.main_loop(events)\n self.screen.blit(fps_text, (self.screen.get_width() - fps_text.\n get_width(), 0))\n pygame.display.flip()\n\n def load_scene(self, scene_object, scene_parameters):\n self.active_scene = scene_object(*scene_parameters)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GameManager:\n\n def __init__(self):\n self.screen = pygame.display.set_mode((1280, 720), flags=pygame.\n FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF)\n self.running = True\n self.delta_time = 1\n self.active_scene = None\n self.load_scene(MainGame.MainGame, (self,))\n self.fps_font = pygame.font.Font('game_data/fonts/calling_code.ttf', 14\n )\n self.pygame_clock = pygame.time.Clock()\n self.pygame_clock.tick()\n pygame.joystick.init()\n self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.\n joystick.get_count())]\n for joystick in self.joystick:\n joystick.init()\n random.seed(time.time())\n self.player_joy = -1\n\n def __del__(self):\n self.exit()\n\n def main_loop(self):\n while self.running:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.exit()\n self.delta_time = float(self.pygame_clock.tick(60)) / 10 ** 3\n fps_text = self.fps_font.render('FPS: {}'.format(round(1 / self\n .delta_time)), False, (255, 255, 255))\n self.active_scene.main_loop(events)\n self.screen.blit(fps_text, (self.screen.get_width() - fps_text.\n get_width(), 0))\n pygame.display.flip()\n\n def load_scene(self, scene_object, scene_parameters):\n self.active_scene = scene_object(*scene_parameters)\n\n def exit(self):\n self.running = False\n",
"step-4": "import pygame\nimport time\nfrom menus import MainMenu\nfrom scenes import TestWorldGen\nfrom scenes import TestAnimation\nfrom scenes import TestLevel2\nfrom scenes import MainGame\nimport random\n\n\nclass GameManager:\n\n def __init__(self):\n self.screen = pygame.display.set_mode((1280, 720), flags=pygame.\n FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF)\n self.running = True\n self.delta_time = 1\n self.active_scene = None\n self.load_scene(MainGame.MainGame, (self,))\n self.fps_font = pygame.font.Font('game_data/fonts/calling_code.ttf', 14\n )\n self.pygame_clock = pygame.time.Clock()\n self.pygame_clock.tick()\n pygame.joystick.init()\n self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.\n joystick.get_count())]\n for joystick in self.joystick:\n joystick.init()\n random.seed(time.time())\n self.player_joy = -1\n\n def __del__(self):\n self.exit()\n\n def main_loop(self):\n while self.running:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.exit()\n self.delta_time = float(self.pygame_clock.tick(60)) / 10 ** 3\n fps_text = self.fps_font.render('FPS: {}'.format(round(1 / self\n .delta_time)), False, (255, 255, 255))\n self.active_scene.main_loop(events)\n self.screen.blit(fps_text, (self.screen.get_width() - fps_text.\n get_width(), 0))\n pygame.display.flip()\n\n def load_scene(self, scene_object, scene_parameters):\n self.active_scene = scene_object(*scene_parameters)\n\n def exit(self):\n self.running = False\n",
"step-5": "import pygame\nimport time\nfrom menus import MainMenu\nfrom scenes import TestWorldGen\nfrom scenes import TestAnimation\nfrom scenes import TestLevel2\nfrom scenes import MainGame\nimport random\n\n\nclass GameManager:\n def __init__(self):\n self.screen = pygame.display.set_mode((1280, 720),\n flags=pygame.FULLSCREEN |\n pygame.HWSURFACE |\n pygame.DOUBLEBUF) # type: pygame.Surface\n\n self.running = True\n\n self.delta_time = 1\n\n self.active_scene = None\n # self.load_scene(MainMenu.MainMenu, (self,))\n # self.load_scene(TestWorldGen.TestWorldGen, (self,))\n # self.load_scene(TestAnimation.TestAnimation, (self,))\n # self.load_scene(TestLevel2.TestLevel, (self, ))\n self.load_scene(MainGame.MainGame, (self,))\n\n self.fps_font = pygame.font.Font(\"game_data/fonts/calling_code.ttf\", 14)\n\n self.pygame_clock = pygame.time.Clock() # type: pygame\n self.pygame_clock.tick()\n pygame.joystick.init()\n self.joystick = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]\n for joystick in self.joystick:\n joystick.init()\n\n random.seed(time.time())\n\n self.player_joy = -1\n\n def __del__(self):\n self.exit()\n\n def main_loop(self):\n while self.running:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.exit()\n\n self.delta_time = float(self.pygame_clock.tick(60)) / (10 ** 3)\n\n fps_text = self.fps_font.render(\"FPS: {}\".format(round(1 / self.delta_time)), False, (255, 255, 255))\n\n self.active_scene.main_loop(events)\n\n self.screen.blit(fps_text, (self.screen.get_width() - fps_text.get_width(), 0))\n\n pygame.display.flip()\n\n def load_scene(self, scene_object, scene_parameters):\n self.active_scene = scene_object(*scene_parameters)\n\n def exit(self):\n self.running = False\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.shortcuts import render
from post.models import *
from .models import *
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from account.models import Profile
from django.contrib.auth.models import User
from django.db.models import Q
# Create your views here.
def index(request):
posts = Post.objects.order_by('-created_at').filter(status='Published')
# about_us = AboutSite.objects.get(id=1)
paginator = Paginator(posts, 9)
page = request.GET.get('page')
post_listings = paginator.get_page(page)
context = {
'posts': post_listings,
# 'about': about_us
}
return render(request, 'hub/index.html', context)
def about(request):
about_us = get_object_or_404(AboutSite,id=1)
context = {
'about': about_us
}
return render(request, 'hub/about.html', context)
def authors(request):
profiles = Profile.objects.all()
context = {
'profiles': profiles
}
return render(request, 'hub/authors.html', context)
def authorDetail(request, pk):
author = User.objects.get(username=pk)
profile = Profile.objects.get(user=author)
posts = Post.objects.order_by('-created_at').filter(status='Published', author=author)
paginator = Paginator(posts, 6)
page = request.GET.get('page')
posts_paginated = paginator.get_page(page)
context = {
'author': profile,
'posts': posts_paginated
}
return render(request, 'hub/authorDetail.html', context)
# def search(request):
# queryset_list = Post.objects.order_by('-created_at')
# if 'q' in request.GET:
# query = request.GET['q']
# if query:
# queryset_list = queryset_list.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(content__icontains=query))
# paginator = Paginator(queryset_list, 1)
# page = request.GET.get('page')
# paginated_result = paginator.get_page(page)
# context = {
# 'posts': paginated_result
# }
# return render(request, 'hub/search.html', context)
|
normal
|
{
"blob_id": "ee3718dee869a58089e897489af2eec3ff72be56",
"index": 3478,
"step-1": "<mask token>\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\n<mask token>\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\n<mask token>\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n author = User.objects.get(username=pk)\n profile = Profile.objects.get(user=author)\n posts = Post.objects.order_by('-created_at').filter(status='Published',\n author=author)\n paginator = Paginator(posts, 6)\n page = request.GET.get('page')\n posts_paginated = paginator.get_page(page)\n context = {'author': profile, 'posts': posts_paginated}\n return render(request, 'hub/authorDetail.html', context)\n",
"step-3": "<mask token>\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\ndef about(request):\n about_us = get_object_or_404(AboutSite, id=1)\n context = {'about': about_us}\n return render(request, 'hub/about.html', context)\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n author = User.objects.get(username=pk)\n profile = Profile.objects.get(user=author)\n posts = Post.objects.order_by('-created_at').filter(status='Published',\n author=author)\n paginator = Paginator(posts, 6)\n page = request.GET.get('page')\n posts_paginated = paginator.get_page(page)\n context = {'author': profile, 'posts': posts_paginated}\n return render(request, 'hub/authorDetail.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom post.models import *\nfrom .models import *\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom account.models import Profile\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\n\ndef index(request):\n posts = Post.objects.order_by('-created_at').filter(status='Published')\n paginator = Paginator(posts, 9)\n page = request.GET.get('page')\n post_listings = paginator.get_page(page)\n context = {'posts': post_listings}\n return render(request, 'hub/index.html', context)\n\n\ndef about(request):\n about_us = get_object_or_404(AboutSite, id=1)\n context = {'about': about_us}\n return render(request, 'hub/about.html', context)\n\n\ndef authors(request):\n profiles = Profile.objects.all()\n context = {'profiles': profiles}\n return render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n author = User.objects.get(username=pk)\n profile = Profile.objects.get(user=author)\n posts = Post.objects.order_by('-created_at').filter(status='Published',\n author=author)\n paginator = Paginator(posts, 6)\n page = request.GET.get('page')\n posts_paginated = paginator.get_page(page)\n context = {'author': profile, 'posts': posts_paginated}\n return render(request, 'hub/authorDetail.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom post.models import *\nfrom .models import *\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom account.models import Profile\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\n\n# Create your views here.\ndef index(request):\n\tposts = Post.objects.order_by('-created_at').filter(status='Published')\n\t# about_us = AboutSite.objects.get(id=1)\n\tpaginator = Paginator(posts, 9)\n\tpage = request.GET.get('page')\n\tpost_listings = paginator.get_page(page)\n\tcontext = {\n\t\t'posts': post_listings,\n\t\t# 'about': about_us\n\t}\n\treturn render(request, 'hub/index.html', context)\n\ndef about(request):\n\tabout_us = get_object_or_404(AboutSite,id=1)\n\tcontext = {\n\t\t'about': about_us\n\t}\n\treturn render(request, 'hub/about.html', context)\n\n\ndef authors(request):\n\tprofiles = Profile.objects.all()\n\tcontext = {\n\t\t'profiles': profiles\n\t}\n\treturn render(request, 'hub/authors.html', context)\n\n\ndef authorDetail(request, pk):\n\tauthor = User.objects.get(username=pk)\n\tprofile = Profile.objects.get(user=author)\n\tposts = Post.objects.order_by('-created_at').filter(status='Published', author=author)\n\tpaginator = Paginator(posts, 6)\n\tpage = request.GET.get('page')\n\tposts_paginated = paginator.get_page(page)\n\tcontext = {\n\t\t'author': profile,\n\t\t'posts': posts_paginated\n\t}\n\treturn render(request, 'hub/authorDetail.html', context)\n\n\n# def search(request):\n# \tqueryset_list = Post.objects.order_by('-created_at')\n\n# \tif 'q' in request.GET:\n# \t\tquery = request.GET['q']\n# \t\tif query:\n# \t\t\tqueryset_list = queryset_list.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(content__icontains=query))\n\n# \tpaginator = Paginator(queryset_list, 1)\n# \tpage = request.GET.get('page')\n# \tpaginated_result = paginator.get_page(page)\n# \tcontext = {\n# \t\t'posts': paginated_result\n# \t}\n# \treturn render(request, 'hub/search.html', context)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator
from pizzaclub.settings import MAX_DNI_LENGTH, MAX_CUIL_LENGTH, PASSWORD_RESET_TIMEOUT
from pizzaclub.settings import MIN_DNI_LENGTH, MIN_CUIL_LENGTH
from pizzaclub.settings import MAX_PHONE_LENGTH, MIN_PHONE_LENGTH
import secrets
import datetime
# Create your models here.
class Address(models.Model):
address = models.CharField(max_length=100, unique=True)
lat = models.DecimalField(max_digits=9, decimal_places=7, default=0)
lon= models.DecimalField(max_digits=9, decimal_places=7, default=0)
elev = models.DecimalField(max_digits=9, decimal_places=2, default=0)
class Meta:
verbose_name_plural = "Address"
def __str__(self):
return self.address
class User(AbstractUser):
'''
Extend the User Django built in model.
Add token data for password reset, ans is_employee flag for Employee Profile.
'''
is_employee = models.BooleanField(default=False)
token = models.CharField(max_length=50)
token_date = models.DateTimeField(auto_now=True)
token_valid = models.BooleanField(default=True)
def is_order_manager(self):
return (self.is_employee and self.is_active) or self.is_superuser
def generate_token(self):
return secrets.token_urlsafe()
def check_token(self, token):
'''
Check token validity for an hour since was generated.
'''
tz = self.token_date.tzinfo
t_now = datetime.datetime.now(tz=tz)
# Check the token time less than hour
dt = t_now - self.token_date
if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:
self.token_valid = False
# Return True if the token is correct and is_valid
res = (token == self.token) and self.token_valid
# Set the token invalid
self.token_valid = False
return res
def save(self, *args, **kwargs):
'''
Until save generate a new token and set valid.
'''
# Generate a token and set valid
self.token = self.generate_token()
self.token_valid = True
super(User, self).save(*args, **kwargs)
class Employee(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
dni = models.CharField(
max_length=MAX_DNI_LENGTH,
unique=True,
validators=[
MinLengthValidator(MIN_DNI_LENGTH),
MaxLengthValidator(MAX_DNI_LENGTH),
RegexValidator(regex=r'^\d+$')
])
cuil = models.CharField(
max_length=MAX_CUIL_LENGTH,
unique=True,
validators=[
MinLengthValidator(MIN_CUIL_LENGTH),
MaxLengthValidator(MAX_CUIL_LENGTH),
RegexValidator(regex=r'^\d+$')
])
phone = models.CharField(
max_length=MAX_PHONE_LENGTH,
null=True,
blank=True,
validators=[
MinLengthValidator(MIN_DNI_LENGTH),
MaxLengthValidator(MAX_DNI_LENGTH),
RegexValidator(regex=r'^\d+$')
])
address = models.ManyToManyField(Address)
def __str__(self):
return self.user.get_full_name()
def save(self, *args, **kwargs):
# Check user is employee
if not self.user.is_employee:
raise TypeError('The User must be an Employee')
# Check validation fields
self.full_clean()
# Save instance
super(Employee, self).save(*args, **kwargs)
class Client(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField()
phone = models.CharField(
max_length=MAX_PHONE_LENGTH,
validators=[
MinLengthValidator(MIN_DNI_LENGTH),
MaxLengthValidator(MAX_DNI_LENGTH),
RegexValidator(regex=r'^\d+$')
])
address = models.ManyToManyField(Address)
|
normal
|
{
"blob_id": "b7511c156c241accaf1668d83ee0a5263b41af0d",
"index": 3465,
"step-1": "<mask token>\n\n\nclass User(AbstractUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n <mask token>\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n",
"step-2": "<mask token>\n\n\nclass User(AbstractUser):\n \"\"\"\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n \"\"\"\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return self.is_employee and self.is_active or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n\n def save(self, *args, **kwargs):\n \"\"\"\n Until save generate a new token and set valid.\n \"\"\"\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n",
"step-3": "<mask token>\n\n\nclass Address(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Address'\n\n def __str__(self):\n return self.address\n\n\nclass User(AbstractUser):\n \"\"\"\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n \"\"\"\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return self.is_employee and self.is_active or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n\n def save(self, *args, **kwargs):\n \"\"\"\n Until save generate a new token and set valid.\n \"\"\"\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n",
"step-4": "<mask token>\n\n\nclass Address(models.Model):\n address = models.CharField(max_length=100, unique=True)\n lat = models.DecimalField(max_digits=9, decimal_places=7, default=0)\n lon = models.DecimalField(max_digits=9, decimal_places=7, default=0)\n elev = models.DecimalField(max_digits=9, decimal_places=2, default=0)\n\n\n class Meta:\n verbose_name_plural = 'Address'\n\n def __str__(self):\n return self.address\n\n\nclass User(AbstractUser):\n \"\"\"\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n \"\"\"\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return self.is_employee and self.is_active or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n \"\"\"\n Check token validity for an hour since was generated.\n \"\"\"\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n res = token == self.token and self.token_valid\n self.token_valid = False\n return res\n\n def save(self, *args, **kwargs):\n \"\"\"\n Until save generate a new token and set valid.\n \"\"\"\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key\n =True)\n dni = models.CharField(max_length=MAX_DNI_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n cuil = models.CharField(max_length=MAX_CUIL_LENGTH, unique=True,\n validators=[MinLengthValidator(MIN_CUIL_LENGTH), MaxLengthValidator\n (MAX_CUIL_LENGTH), RegexValidator(regex='^\\\\d+$')])\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, null=True, blank=\n True, validators=[MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n\n def save(self, *args, **kwargs):\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n self.full_clean()\n super(Employee, self).save(*args, **kwargs)\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(max_length=MAX_PHONE_LENGTH, validators=[\n MinLengthValidator(MIN_DNI_LENGTH), MaxLengthValidator(\n MAX_DNI_LENGTH), RegexValidator(regex='^\\\\d+$')])\n address = models.ManyToManyField(Address)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator\n\nfrom pizzaclub.settings import MAX_DNI_LENGTH, MAX_CUIL_LENGTH, PASSWORD_RESET_TIMEOUT\nfrom pizzaclub.settings import MIN_DNI_LENGTH, MIN_CUIL_LENGTH\nfrom pizzaclub.settings import MAX_PHONE_LENGTH, MIN_PHONE_LENGTH\n\nimport secrets\nimport datetime\n# Create your models here.\nclass Address(models.Model):\n address = models.CharField(max_length=100, unique=True)\n lat = models.DecimalField(max_digits=9, decimal_places=7, default=0)\n lon= models.DecimalField(max_digits=9, decimal_places=7, default=0)\n elev = models.DecimalField(max_digits=9, decimal_places=2, default=0)\n\n class Meta:\n verbose_name_plural = \"Address\"\n\n def __str__(self):\n return self.address\n\nclass User(AbstractUser):\n '''\n Extend the User Django built in model.\n Add token data for password reset, ans is_employee flag for Employee Profile.\n '''\n is_employee = models.BooleanField(default=False)\n token = models.CharField(max_length=50)\n token_date = models.DateTimeField(auto_now=True)\n token_valid = models.BooleanField(default=True)\n\n def is_order_manager(self):\n return (self.is_employee and self.is_active) or self.is_superuser\n\n def generate_token(self):\n return secrets.token_urlsafe()\n\n def check_token(self, token):\n '''\n Check token validity for an hour since was generated.\n '''\n tz = self.token_date.tzinfo\n t_now = datetime.datetime.now(tz=tz)\n\n # Check the token time less than hour\n dt = t_now - self.token_date\n if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:\n self.token_valid = False\n\n # Return True if the token is correct and is_valid\n res = (token == self.token) and self.token_valid\n \n # Set the token invalid\n self.token_valid = False\n\n return res\n\n def save(self, *args, **kwargs):\n '''\n Until save generate a new token and set valid.\n '''\n # Generate a token and set valid\n self.token = self.generate_token()\n self.token_valid = True\n super(User, self).save(*args, **kwargs)\n\nclass Employee(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)\n dni = models.CharField(\n max_length=MAX_DNI_LENGTH,\n unique=True,\n validators=[\n MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n cuil = models.CharField(\n max_length=MAX_CUIL_LENGTH,\n unique=True,\n validators=[\n MinLengthValidator(MIN_CUIL_LENGTH),\n MaxLengthValidator(MAX_CUIL_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n phone = models.CharField(\n max_length=MAX_PHONE_LENGTH,\n null=True,\n blank=True,\n validators=[\n MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n address = models.ManyToManyField(Address)\n\n def __str__(self):\n return self.user.get_full_name()\n \n def save(self, *args, **kwargs):\n # Check user is employee\n if not self.user.is_employee:\n raise TypeError('The User must be an Employee')\n # Check validation fields\n self.full_clean()\n # Save instance\n super(Employee, self).save(*args, **kwargs)\n\nclass Client(models.Model):\n name = models.CharField(max_length=30)\n email = models.EmailField()\n phone = models.CharField(\n max_length=MAX_PHONE_LENGTH,\n validators=[\n MinLengthValidator(MIN_DNI_LENGTH),\n MaxLengthValidator(MAX_DNI_LENGTH),\n RegexValidator(regex=r'^\\d+$')\n ])\n address = models.ManyToManyField(Address)\n\n",
"step-ids": [
8,
13,
15,
16,
18
]
}
|
[
8,
13,
15,
16,
18
] |
from django.db.models import Exists
from django.db.models import OuterRef
from django.db.models import QuerySet
from django.utils import timezone
class ProductQuerySet(QuerySet):
def available(self):
return self.filter(available_in__contains=timezone.now(), category__public=True)
def annotate_subproducts(self):
from .models import SubProductRelation
subproducts = SubProductRelation.objects.filter(
bundle_product=OuterRef("pk"),
)
return self.annotate(
has_subproducts=Exists(subproducts),
)
class OrderQuerySet(QuerySet):
def not_cancelled(self):
return self.filter(cancelled=False)
def open(self):
return self.filter(open__isnull=False)
def paid(self):
return self.filter(paid=True)
def unpaid(self):
return self.filter(paid=False)
def cancelled(self):
return self.filter(cancelled=True)
|
normal
|
{
"blob_id": "3fdf67c3e0e4c3aa8a3fed09102aca0272b5ff4f",
"index": 6938,
"step-1": "<mask token>\n\n\nclass OrderQuerySet(QuerySet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductQuerySet(QuerySet):\n <mask token>\n <mask token>\n\n\nclass OrderQuerySet(QuerySet):\n\n def not_cancelled(self):\n return self.filter(cancelled=False)\n\n def open(self):\n return self.filter(open__isnull=False)\n\n def paid(self):\n return self.filter(paid=True)\n\n def unpaid(self):\n return self.filter(paid=False)\n\n def cancelled(self):\n return self.filter(cancelled=True)\n",
"step-3": "<mask token>\n\n\nclass ProductQuerySet(QuerySet):\n\n def available(self):\n return self.filter(available_in__contains=timezone.now(),\n category__public=True)\n <mask token>\n\n\nclass OrderQuerySet(QuerySet):\n\n def not_cancelled(self):\n return self.filter(cancelled=False)\n\n def open(self):\n return self.filter(open__isnull=False)\n\n def paid(self):\n return self.filter(paid=True)\n\n def unpaid(self):\n return self.filter(paid=False)\n\n def cancelled(self):\n return self.filter(cancelled=True)\n",
"step-4": "<mask token>\n\n\nclass ProductQuerySet(QuerySet):\n\n def available(self):\n return self.filter(available_in__contains=timezone.now(),\n category__public=True)\n\n def annotate_subproducts(self):\n from .models import SubProductRelation\n subproducts = SubProductRelation.objects.filter(bundle_product=\n OuterRef('pk'))\n return self.annotate(has_subproducts=Exists(subproducts))\n\n\nclass OrderQuerySet(QuerySet):\n\n def not_cancelled(self):\n return self.filter(cancelled=False)\n\n def open(self):\n return self.filter(open__isnull=False)\n\n def paid(self):\n return self.filter(paid=True)\n\n def unpaid(self):\n return self.filter(paid=False)\n\n def cancelled(self):\n return self.filter(cancelled=True)\n",
"step-5": "from django.db.models import Exists\nfrom django.db.models import OuterRef\nfrom django.db.models import QuerySet\nfrom django.utils import timezone\n\n\nclass ProductQuerySet(QuerySet):\n def available(self):\n return self.filter(available_in__contains=timezone.now(), category__public=True)\n\n def annotate_subproducts(self):\n from .models import SubProductRelation\n\n subproducts = SubProductRelation.objects.filter(\n bundle_product=OuterRef(\"pk\"),\n )\n return self.annotate(\n has_subproducts=Exists(subproducts),\n )\n\n\nclass OrderQuerySet(QuerySet):\n def not_cancelled(self):\n return self.filter(cancelled=False)\n\n def open(self):\n return self.filter(open__isnull=False)\n\n def paid(self):\n return self.filter(paid=True)\n\n def unpaid(self):\n return self.filter(paid=False)\n\n def cancelled(self):\n return self.filter(cancelled=True)\n",
"step-ids": [
1,
7,
8,
9,
11
]
}
|
[
1,
7,
8,
9,
11
] |
import json, re, bcrypt, jwt
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Avg
from django.http import JsonResponse
from django.views import View
from room.models import Room, Category, RoomAmenity,Image,Amenity,WishList,DisableDate,AbleTime
from reservation.check import check, check_in, check_out
from user.models import User, Host, Review
from user.utils import LoginRequired
class RoomListView(View):
def get(self,request):
try:
city = request.GET.get('city','')
checkin = request.GET.get('checkin',None)
checkout = request.GET.get('checkout', None)
adult = int(request.GET.get('adult','0'))
child = int(request.GET.get('child','0'))
min_price = request.GET.get('min_price',0)
max_price = request.GET.get('max_price',100000000)
is_refund = True if request.GET.get('is_refund',None) == 'true' else False
is_super = True if request.GET.get('is_super',None) == 'true' else False
room_types = request.GET.getlist('room_type',None)
amenities = request.GET.getlist('amenity',None)
page = int(request.GET.get('page', '1'))
#필터
list_criteria = {
'city__contains': city,
'price__range' : [min_price,max_price],
'capacity__gte' : adult+child
}
if room_types:
list_criteria['category__name__in'] = room_types
if amenities:
list_criteria['amenity__name__in'] = amenities
if is_super:
list_criteria['host__is_super'] = is_super
if is_refund:
list_criteria['is_refund'] = is_refund
#paginator
size = 10
offset = (page-1) * size
limit = page * size
room_list = Room.objects.filter(**list_criteria)
#날짜 필터
if checkin and checkout:
room_list = [room for room in room_list if check(room, checkin, checkout)]
if checkin:
room_list = [room for room in room_list if check_in(room, checkin)]
if checkout:
room_list = [room for room in room_list if check_out(room, checkout)]
if not room_list:
return JsonResponse({'message':'NO_ROOM_AVAILABLE'}, status=400)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
room_thumbnail = [{
'room_id' : room.id,
'room_name' : room.name,
'price' : room.price,
'address' : room.city,
'room_type' : room.category.name,
'lat' : room.latitude,
'lng' : room.longtitude,
'image' : [image.url for image in room.image.all()],
'is_super' : room.host.is_super,
'capacity' : int(room.capacity),
'amenity' : [roomamenity.amenity.name for roomamenity in room.roomamenity_set.all()],
'rating' : [{
'category' : category,
'category_rating': Review.objects.filter(review_room=room).aggregate(rate_avg=Avg(category))['rate_avg']
} for category in rating_list
]
} for room in room_list[offset:limit]
]
common_data = len(room_list)
return JsonResponse({'thumbnail': room_thumbnail, 'common':common_data }, status=200)
except KeyError:
return JsonResponse({'message':'KeyError'}, status=400)
class RoomView(View):
def get(self,request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
room_detail = {
'room_name': room.name,
'address' : room.city,
'price' : room.price,
'room_type': room.category.name,
'image' : [image.url for image in room.image.all()][0],
'is_super' : room.host.is_super,
'host' : room.host.user.last_name + room.host.user.first_name,
'capacity' : room.capacity,
'amenity' : [{
'id' : roomamenity.amenity.id,
'icon' : re.sub('<i class=\\"|\\"></i>', '',roomamenity.amenity.image),
'description': roomamenity.amenity.name
} for roomamenity in room.roomamenity_set.all()
],
'rating' : [{
'category' : category,
'category_rating': int(Review.objects.filter(review_room=room).aggregate(Avg(category)).get(category+'__avg'))
} for category in rating_list
]
}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message':'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message':'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id).exists():
return JsonResponse({'MESSAGE':'Already Choosen'}, status=400)
WishList.objects.create(
wish_user_id = 1,
wish_room_id = room_id
)
return JsonResponse({'MESSAGE':'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE':'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE':'Already not Exist in list'}, status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user = user)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
if not wishlists:
return JsonResponse({'MESSAGE':'nothing in cart'}, status=400)
result = [{
'room_id' : wishlist.wish_room.id,
'room_name': wishlist.wish_room.name,
'address' : wishlist.wish_room.city,
'price' : wishlist.wish_room.price,
'room_type': wishlist.wish_room.category.name,
'image' : [image.url for image in wishlist.wish_room.image.all()],
'is_super' : wishlist.wish_room.host.is_super,
'capacity' : wishlist.wish_room.capacity,
'lat' : wishlist.wish_room.latitude,
'lng' : wishlist.wish_room.longtitude,
'amenity' : [roomamenity.amenity.name for roomamenity in wishlist.wish_room.roomamenity_set.all()],
'rating' : [{
'category' : category,
'category_rating': Review.objects.filter(review_room=wishlist.wish_room).aggregate(Avg(category)).get(category+'__avg')
} for category in rating_list
]
} for wishlist in wishlists]
return JsonResponse({'result':result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
|
normal
|
{
"blob_id": "cc5b22a0246fcc9feaed6a0663095a6003e6cef1",
"index": 6685,
"step-1": "<mask token>\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-2": "<mask token>\n\n\nclass RoomListView(View):\n <mask token>\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-3": "<mask token>\n\n\nclass RoomListView(View):\n\n def get(self, request):\n try:\n city = request.GET.get('city', '')\n checkin = request.GET.get('checkin', None)\n checkout = request.GET.get('checkout', None)\n adult = int(request.GET.get('adult', '0'))\n child = int(request.GET.get('child', '0'))\n min_price = request.GET.get('min_price', 0)\n max_price = request.GET.get('max_price', 100000000)\n is_refund = True if request.GET.get('is_refund', None\n ) == 'true' else False\n is_super = True if request.GET.get('is_super', None\n ) == 'true' else False\n room_types = request.GET.getlist('room_type', None)\n amenities = request.GET.getlist('amenity', None)\n page = int(request.GET.get('page', '1'))\n list_criteria = {'city__contains': city, 'price__range': [\n min_price, max_price], 'capacity__gte': adult + child}\n if room_types:\n list_criteria['category__name__in'] = room_types\n if amenities:\n list_criteria['amenity__name__in'] = amenities\n if is_super:\n list_criteria['host__is_super'] = is_super\n if is_refund:\n list_criteria['is_refund'] = is_refund\n size = 10\n offset = (page - 1) * size\n limit = page * size\n room_list = Room.objects.filter(**list_criteria)\n if checkin and checkout:\n room_list = [room for room in room_list if check(room,\n checkin, checkout)]\n if checkin:\n room_list = [room for room in room_list if check_in(room,\n checkin)]\n if checkout:\n room_list = [room for room in room_list if check_out(room,\n checkout)]\n if not room_list:\n return JsonResponse({'message': 'NO_ROOM_AVAILABLE'},\n status=400)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_thumbnail = [{'room_id': room.id, 'room_name': room.name,\n 'price': room.price, 'address': room.city, 'room_type':\n room.category.name, 'lat': room.latitude, 'lng': room.\n longtitude, 'image': [image.url for image in room.image.all\n ()], 'is_super': room.host.is_super, 'capacity': int(room.\n capacity), 'amenity': [roomamenity.amenity.name for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': Review.objects.\n filter(review_room=room).aggregate(rate_avg=Avg(category))[\n 'rate_avg']} for category in rating_list]} for room in\n room_list[offset:limit]]\n common_data = len(room_list)\n return JsonResponse({'thumbnail': room_thumbnail, 'common':\n common_data}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-4": "import json, re, bcrypt, jwt\nfrom datetime import datetime, timedelta\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Avg\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom room.models import Room, Category, RoomAmenity, Image, Amenity, WishList, DisableDate, AbleTime\nfrom reservation.check import check, check_in, check_out\nfrom user.models import User, Host, Review\nfrom user.utils import LoginRequired\n\n\nclass RoomListView(View):\n\n def get(self, request):\n try:\n city = request.GET.get('city', '')\n checkin = request.GET.get('checkin', None)\n checkout = request.GET.get('checkout', None)\n adult = int(request.GET.get('adult', '0'))\n child = int(request.GET.get('child', '0'))\n min_price = request.GET.get('min_price', 0)\n max_price = request.GET.get('max_price', 100000000)\n is_refund = True if request.GET.get('is_refund', None\n ) == 'true' else False\n is_super = True if request.GET.get('is_super', None\n ) == 'true' else False\n room_types = request.GET.getlist('room_type', None)\n amenities = request.GET.getlist('amenity', None)\n page = int(request.GET.get('page', '1'))\n list_criteria = {'city__contains': city, 'price__range': [\n min_price, max_price], 'capacity__gte': adult + child}\n if room_types:\n list_criteria['category__name__in'] = room_types\n if amenities:\n list_criteria['amenity__name__in'] = amenities\n if is_super:\n list_criteria['host__is_super'] = is_super\n if is_refund:\n list_criteria['is_refund'] = is_refund\n size = 10\n offset = (page - 1) * size\n limit = page * size\n room_list = Room.objects.filter(**list_criteria)\n if checkin and checkout:\n room_list = [room for room in room_list if check(room,\n checkin, checkout)]\n if checkin:\n room_list = [room for room in room_list if check_in(room,\n checkin)]\n if checkout:\n room_list = [room for room in room_list if check_out(room,\n checkout)]\n if not room_list:\n return JsonResponse({'message': 'NO_ROOM_AVAILABLE'},\n status=400)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_thumbnail = [{'room_id': room.id, 'room_name': room.name,\n 'price': room.price, 'address': room.city, 'room_type':\n room.category.name, 'lat': room.latitude, 'lng': room.\n longtitude, 'image': [image.url for image in room.image.all\n ()], 'is_super': room.host.is_super, 'capacity': int(room.\n capacity), 'amenity': [roomamenity.amenity.name for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': Review.objects.\n filter(review_room=room).aggregate(rate_avg=Avg(category))[\n 'rate_avg']} for category in rating_list]} for room in\n room_list[offset:limit]]\n common_data = len(room_list)\n return JsonResponse({'thumbnail': room_thumbnail, 'common':\n common_data}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-5": "import json, re, bcrypt, jwt\n\nfrom datetime import datetime, timedelta\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Avg\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom room.models import Room, Category, RoomAmenity,Image,Amenity,WishList,DisableDate,AbleTime\nfrom reservation.check import check, check_in, check_out\nfrom user.models import User, Host, Review\nfrom user.utils import LoginRequired\n\nclass RoomListView(View):\n def get(self,request):\n try: \n city = request.GET.get('city','')\n checkin = request.GET.get('checkin',None)\n checkout = request.GET.get('checkout', None)\n adult = int(request.GET.get('adult','0'))\n child = int(request.GET.get('child','0'))\n min_price = request.GET.get('min_price',0)\n max_price = request.GET.get('max_price',100000000)\n is_refund = True if request.GET.get('is_refund',None) == 'true' else False\n is_super = True if request.GET.get('is_super',None) == 'true' else False\n room_types = request.GET.getlist('room_type',None)\n amenities = request.GET.getlist('amenity',None)\n page = int(request.GET.get('page', '1'))\n \n #필터\n list_criteria = {\n 'city__contains': city,\n 'price__range' : [min_price,max_price],\n 'capacity__gte' : adult+child\n }\n if room_types: \n list_criteria['category__name__in'] = room_types\n if amenities: \n list_criteria['amenity__name__in'] = amenities\n if is_super: \n list_criteria['host__is_super'] = is_super\n if is_refund: \n list_criteria['is_refund'] = is_refund\n\n #paginator\n size = 10\n offset = (page-1) * size\n limit = page * size\n\n room_list = Room.objects.filter(**list_criteria)\n \n #날짜 필터\n if checkin and checkout:\n room_list = [room for room in room_list if check(room, checkin, checkout)]\n if checkin:\n room_list = [room for room in room_list if check_in(room, checkin)]\n if checkout:\n room_list = [room for room in room_list if check_out(room, checkout)]\n if not room_list:\n return JsonResponse({'message':'NO_ROOM_AVAILABLE'}, status=400)\n\n rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]\n \n room_thumbnail = [{\n 'room_id' : room.id,\n 'room_name' : room.name,\n 'price' : room.price,\n 'address' : room.city,\n 'room_type' : room.category.name,\n 'lat' : room.latitude,\n 'lng' : room.longtitude,\n 'image' : [image.url for image in room.image.all()],\n 'is_super' : room.host.is_super,\n 'capacity' : int(room.capacity),\n 'amenity' : [roomamenity.amenity.name for roomamenity in room.roomamenity_set.all()],\n 'rating' : [{\n 'category' : category,\n 'category_rating': Review.objects.filter(review_room=room).aggregate(rate_avg=Avg(category))['rate_avg']\n } for category in rating_list\n ]\n } for room in room_list[offset:limit]\n ]\n\n common_data = len(room_list)\n \n return JsonResponse({'thumbnail': room_thumbnail, 'common':common_data }, status=200)\n\n except KeyError:\n return JsonResponse({'message':'KeyError'}, status=400)\n\nclass RoomView(View):\n def get(self,request, room_id):\n try: \n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]\n\n room_detail = {\n 'room_name': room.name,\n 'address' : room.city,\n 'price' : room.price,\n 'room_type': room.category.name,\n 'image' : [image.url for image in room.image.all()][0],\n 'is_super' : room.host.is_super,\n 'host' : room.host.user.last_name + room.host.user.first_name,\n 'capacity' : room.capacity,\n 'amenity' : [{\n 'id' : roomamenity.amenity.id,\n 'icon' : re.sub('<i class=\\\\\"|\\\\\"></i>', '',roomamenity.amenity.image),\n 'description': roomamenity.amenity.name\n } for roomamenity in room.roomamenity_set.all()\n ],\n 'rating' : [{\n 'category' : category,\n 'category_rating': int(Review.objects.filter(review_room=room).aggregate(Avg(category)).get(category+'__avg'))\n } for category in rating_list\n ]\n }\n \n return JsonResponse({'detail': room_detail}, status=200)\n \n except KeyError:\n return JsonResponse({'message':'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message':'NOT_FOUND_ROOM_ID'}, status=400)\n\nclass WishListView(View):\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id).exists():\n return JsonResponse({'MESSAGE':'Already Choosen'}, status=400)\n \n WishList.objects.create(\n wish_user_id = 1,\n wish_room_id = room_id\n )\n return JsonResponse({'MESSAGE':'SUCCESS'}, status=200)\n\n except KeyError:\n return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400) \n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n \n wish.delete()\n return JsonResponse({'MESSAGE':'Delete Success'}, status=200)\n \n except KeyError:\n return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE':'Already not Exist in list'}, status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user = user)\n rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]\n\n if not wishlists:\n return JsonResponse({'MESSAGE':'nothing in cart'}, status=400)\n\n\n result = [{\n 'room_id' : wishlist.wish_room.id,\n 'room_name': wishlist.wish_room.name,\n 'address' : wishlist.wish_room.city,\n 'price' : wishlist.wish_room.price,\n 'room_type': wishlist.wish_room.category.name,\n 'image' : [image.url for image in wishlist.wish_room.image.all()],\n 'is_super' : wishlist.wish_room.host.is_super,\n 'capacity' : wishlist.wish_room.capacity,\n 'lat' : wishlist.wish_room.latitude,\n 'lng' : wishlist.wish_room.longtitude,\n 'amenity' : [roomamenity.amenity.name for roomamenity in wishlist.wish_room.roomamenity_set.all()],\n 'rating' : [{\n 'category' : category,\n 'category_rating': Review.objects.filter(review_room=wishlist.wish_room).aggregate(Avg(category)).get(category+'__avg')\n } for category in rating_list\n ]\n } for wishlist in wishlists]\n \n return JsonResponse({'result':result}, status=200)\n\n except KeyError:\n return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import gc
import sys
import time
import warnings
import multiprocessing
import numpy as np
import pandas as pd
import lightgbm as lgb
from os import path, makedirs
from tqdm import tqdm
from utils import Logger
from datetime import datetime
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
# ======================================================================= Method
def load_dataframe(dataset):
return pd.read_csv(dataset)
def augment(x, y, t=2):
xs, xn = [], []
for i in range(t):
mask = y > 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c+200, c+400]]
np.random.shuffle(val)
x1[:, [c, c+200, c+400]] = val
xs.append(x1)
for i in range(t//2):
mask = y == 0
x1 = x[mask].copy()
for c in range(200):
val = x1[:, [c, c+200, c+400]]
np.random.shuffle(val)
x1[:, [c, c+200, c+400]] = val
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x, xs, xn])
y = np.concatenate([y, ys, yn])
return x, y
# ======================================================================= Main
if __name__ == '__main__':
gc.enable()
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
warnings.simplefilter('ignore', UserWarning)
# =================================================================== Params
top_folder = './output'
today = datetime.today()
now = today.strftime('%m%d-%H%M')
log_name = now + '.txt'
sys.stdout = Logger(path.join(top_folder, log_name))
seed_np = 1011
np.random.seed(seed_np)
print('numpy seed: {}'.format(seed_np))
# =================================================================== Load Data
start = time.time()
with multiprocessing.Pool() as pool:
train, test = pool.map(load_dataframe, ['./input/train.csv', './input/test.csv'])
# === fake sample
df_test = test.drop(columns=['ID_code']).values
unique_samples = []
unique_count = np.zeros_like(df_test)
for feature in tqdm(range(df_test.shape[1])):
_, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True)
unique_count[index_[count_ == 1], feature] += 1
idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]
idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]
synthetic = test.loc[idx_synthetic]
test = test.loc[idx_score]
raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)
# ============================== Extra Feature
len_train = len(train)
col_var = list(raw.columns[2:])
# === replace value(frequency=1) to NA
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[cnt == 1].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [col + '_repeat_2' for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
# === replace value(frequency=1/2) to NA
mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)
for col in tqdm(col_var):
cnt = raw[col].value_counts()
val = cnt[np.isin(cnt, [1, 2])].index
mask.loc[np.isin(raw[col], val), col] = 0
col_repeat = [col + '_repeat_3' for col in col_var]
raw[col_repeat] = raw[col_var][mask.astype(bool)]
raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)
# === logging
print('data: {}'.format(raw.shape))
print('elapsed time: {:.1f} min'.format((time.time() - start)/60))
# =================================================================== PreProcess
feats = [col for col in raw.columns.values if col not in ['ID_code', 'target']]
# =================================================================== Model
train = raw[:len_train]
test = raw[len_train:].copy()
x_train = train[feats]
y_train = train['target']
x_test = test[feats]
print('trn_x: {}'.format(x_train.shape))
print('x_test: {}'.format(x_test.shape))
param = {
'objective': 'binary',
'boosting': 'gbdt',
'metric': 'auc',
'verbosity': -1,
'n_jobs': 11,
'random_state': 1993,
'learning_rate': 0.01,
'num_leaves': 8,
'max_depth': -1,
'feature_fraction': 0.05,
'bagging_freq': 5,
'bagging_fraction': 0.4,
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
}
print('model params:\n{}'.format(pd.Series(list(param.values()), index=list(param.keys()))))
seed_fold = 26
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)
print('StratifiedKFold seed: {}'.format(seed_fold))
round_max = 30000
round_early_stopping = 3000
print('num_round: {}'.format(round_max))
print('early_stopping_round: {}'.format(round_early_stopping))
# === training
oof = np.zeros(len(x_train))
predictions = np.zeros(len(x_test))
start = time.time()
for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values, y_train.values)):
print("fold n°{}".format(fold_))
trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]
N = 5
for i in range(N):
X_t, y_t = augment(trn_x.values, trn_y.values)
X_t = pd.DataFrame(X_t, columns=feats)
trn_data = lgb.Dataset(X_t, label=y_t)
val_data = lgb.Dataset(val_x, label=val_y)
evals_result = {}
clf = lgb.train(param,
trn_data,
round_max,
valid_sets=[trn_data, val_data],
early_stopping_rounds=round_early_stopping,
verbose_eval=1000,
evals_result=evals_result)
oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration) / N
predictions += clf.predict(x_test, num_iteration=clf.best_iteration) / folds.n_splits / N
fold_score = roc_auc_score(val_y, oof[val_idx])
print('fold {} auc score: {:.5f}'.format(fold_, fold_score))
cv_score = roc_auc_score(y_train, oof)
print('elapsed time: {:.1f} min'.format((time.time() - start)/60))
print('auc score: {:.5f}'.format(cv_score))
# =================================================================== Saving File
sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(cv_score, 5)))
makedirs(sub_folder, exist_ok=True)
test['target'] = predictions
test[['ID_code', 'target']].to_csv(path.join(sub_folder, 'submission.csv'), index=False)
raw['oof'] = np.concatenate([oof, predictions], axis=0)
raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=False)
|
normal
|
{
"blob_id": "74c875d00c665aabbcad4e23e6059c3445d5e7bd",
"index": 1597,
"step-1": "<mask token>\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\ndef augment(x, y, t=2):\n xs, xn = [], []\n for i in range(t):\n mask = y > 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xs.append(x1)\n for i in range(t // 2):\n mask = y == 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xn.append(x1)\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x, xs, xn])\n y = np.concatenate([y, ys, yn])\n return x, y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\ndef augment(x, y, t=2):\n xs, xn = [], []\n for i in range(t):\n mask = y > 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xs.append(x1)\n for i in range(t // 2):\n mask = y == 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xn.append(x1)\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x, xs, xn])\n y = np.concatenate([y, ys, yn])\n return x, y\n\n\nif __name__ == '__main__':\n gc.enable()\n pd.set_option('max_rows', None)\n pd.set_option('max_columns', None)\n warnings.simplefilter('ignore', UserWarning)\n top_folder = './output'\n today = datetime.today()\n now = today.strftime('%m%d-%H%M')\n log_name = now + '.txt'\n sys.stdout = Logger(path.join(top_folder, log_name))\n seed_np = 1011\n np.random.seed(seed_np)\n print('numpy seed: {}'.format(seed_np))\n start = time.time()\n with multiprocessing.Pool() as pool:\n train, test = pool.map(load_dataframe, ['./input/train.csv',\n './input/test.csv'])\n df_test = test.drop(columns=['ID_code']).values\n unique_samples = []\n unique_count = np.zeros_like(df_test)\n for feature in tqdm(range(df_test.shape[1])):\n _, index_, count_ = np.unique(df_test[:, feature], return_counts=\n True, return_index=True)\n unique_count[index_[count_ == 1], feature] += 1\n idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\n idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\n synthetic = test.loc[idx_synthetic]\n test = test.loc[idx_score]\n raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)\n len_train = len(train)\n col_var = list(raw.columns[2:])\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[cnt == 1].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_2') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[np.isin(cnt, [1, 2])].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_3') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)\n print('data: {}'.format(raw.shape))\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n feats = [col for col in raw.columns.values if col not in ['ID_code',\n 'target']]\n train = raw[:len_train]\n test = raw[len_train:].copy()\n x_train = train[feats]\n y_train = train['target']\n x_test = test[feats]\n print('trn_x: {}'.format(x_train.shape))\n print('x_test: {}'.format(x_test.shape))\n param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',\n 'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,\n 'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,\n 'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction': \n 0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}\n print('model params:\\n{}'.format(pd.Series(list(param.values()), index=\n list(param.keys()))))\n seed_fold = 26\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)\n print('StratifiedKFold seed: {}'.format(seed_fold))\n round_max = 30000\n round_early_stopping = 3000\n print('num_round: {}'.format(round_max))\n print('early_stopping_round: {}'.format(round_early_stopping))\n oof = np.zeros(len(x_train))\n predictions = np.zeros(len(x_test))\n start = time.time()\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,\n y_train.values)):\n print('fold n°{}'.format(fold_))\n trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]\n val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]\n N = 5\n for i in range(N):\n X_t, y_t = augment(trn_x.values, trn_y.values)\n X_t = pd.DataFrame(X_t, columns=feats)\n trn_data = lgb.Dataset(X_t, label=y_t)\n val_data = lgb.Dataset(val_x, label=val_y)\n evals_result = {}\n clf = lgb.train(param, trn_data, round_max, valid_sets=[\n trn_data, val_data], early_stopping_rounds=\n round_early_stopping, verbose_eval=1000, evals_result=\n evals_result)\n oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration\n ) / N\n predictions += clf.predict(x_test, num_iteration=clf.best_iteration\n ) / folds.n_splits / N\n fold_score = roc_auc_score(val_y, oof[val_idx])\n print('fold {} auc score: {:.5f}'.format(fold_, fold_score))\n cv_score = roc_auc_score(y_train, oof)\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n print('auc score: {:.5f}'.format(cv_score))\n sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(\n cv_score, 5)))\n makedirs(sub_folder, exist_ok=True)\n test['target'] = predictions\n test[['ID_code', 'target']].to_csv(path.join(sub_folder,\n 'submission.csv'), index=False)\n raw['oof'] = np.concatenate([oof, predictions], axis=0)\n raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=\n False)\n",
"step-4": "import gc\nimport sys\nimport time\nimport warnings\nimport multiprocessing\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom os import path, makedirs\nfrom tqdm import tqdm\nfrom utils import Logger\nfrom datetime import datetime\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndef load_dataframe(dataset):\n return pd.read_csv(dataset)\n\n\ndef augment(x, y, t=2):\n xs, xn = [], []\n for i in range(t):\n mask = y > 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xs.append(x1)\n for i in range(t // 2):\n mask = y == 0\n x1 = x[mask].copy()\n for c in range(200):\n val = x1[:, [c, c + 200, c + 400]]\n np.random.shuffle(val)\n x1[:, [c, c + 200, c + 400]] = val\n xn.append(x1)\n xs = np.vstack(xs)\n xn = np.vstack(xn)\n ys = np.ones(xs.shape[0])\n yn = np.zeros(xn.shape[0])\n x = np.vstack([x, xs, xn])\n y = np.concatenate([y, ys, yn])\n return x, y\n\n\nif __name__ == '__main__':\n gc.enable()\n pd.set_option('max_rows', None)\n pd.set_option('max_columns', None)\n warnings.simplefilter('ignore', UserWarning)\n top_folder = './output'\n today = datetime.today()\n now = today.strftime('%m%d-%H%M')\n log_name = now + '.txt'\n sys.stdout = Logger(path.join(top_folder, log_name))\n seed_np = 1011\n np.random.seed(seed_np)\n print('numpy seed: {}'.format(seed_np))\n start = time.time()\n with multiprocessing.Pool() as pool:\n train, test = pool.map(load_dataframe, ['./input/train.csv',\n './input/test.csv'])\n df_test = test.drop(columns=['ID_code']).values\n unique_samples = []\n unique_count = np.zeros_like(df_test)\n for feature in tqdm(range(df_test.shape[1])):\n _, index_, count_ = np.unique(df_test[:, feature], return_counts=\n True, return_index=True)\n unique_count[index_[count_ == 1], feature] += 1\n idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\n idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\n synthetic = test.loc[idx_synthetic]\n test = test.loc[idx_score]\n raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)\n len_train = len(train)\n col_var = list(raw.columns[2:])\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[cnt == 1].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_2') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\n for col in tqdm(col_var):\n cnt = raw[col].value_counts()\n val = cnt[np.isin(cnt, [1, 2])].index\n mask.loc[np.isin(raw[col], val), col] = 0\n col_repeat = [(col + '_repeat_3') for col in col_var]\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\n raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)\n print('data: {}'.format(raw.shape))\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n feats = [col for col in raw.columns.values if col not in ['ID_code',\n 'target']]\n train = raw[:len_train]\n test = raw[len_train:].copy()\n x_train = train[feats]\n y_train = train['target']\n x_test = test[feats]\n print('trn_x: {}'.format(x_train.shape))\n print('x_test: {}'.format(x_test.shape))\n param = {'objective': 'binary', 'boosting': 'gbdt', 'metric': 'auc',\n 'verbosity': -1, 'n_jobs': 11, 'random_state': 1993,\n 'learning_rate': 0.01, 'num_leaves': 8, 'max_depth': -1,\n 'feature_fraction': 0.05, 'bagging_freq': 5, 'bagging_fraction': \n 0.4, 'min_data_in_leaf': 80, 'min_sum_hessian_in_leaf': 10.0}\n print('model params:\\n{}'.format(pd.Series(list(param.values()), index=\n list(param.keys()))))\n seed_fold = 26\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)\n print('StratifiedKFold seed: {}'.format(seed_fold))\n round_max = 30000\n round_early_stopping = 3000\n print('num_round: {}'.format(round_max))\n print('early_stopping_round: {}'.format(round_early_stopping))\n oof = np.zeros(len(x_train))\n predictions = np.zeros(len(x_test))\n start = time.time()\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values,\n y_train.values)):\n print('fold n°{}'.format(fold_))\n trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]\n val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]\n N = 5\n for i in range(N):\n X_t, y_t = augment(trn_x.values, trn_y.values)\n X_t = pd.DataFrame(X_t, columns=feats)\n trn_data = lgb.Dataset(X_t, label=y_t)\n val_data = lgb.Dataset(val_x, label=val_y)\n evals_result = {}\n clf = lgb.train(param, trn_data, round_max, valid_sets=[\n trn_data, val_data], early_stopping_rounds=\n round_early_stopping, verbose_eval=1000, evals_result=\n evals_result)\n oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration\n ) / N\n predictions += clf.predict(x_test, num_iteration=clf.best_iteration\n ) / folds.n_splits / N\n fold_score = roc_auc_score(val_y, oof[val_idx])\n print('fold {} auc score: {:.5f}'.format(fold_, fold_score))\n cv_score = roc_auc_score(y_train, oof)\n print('elapsed time: {:.1f} min'.format((time.time() - start) / 60))\n print('auc score: {:.5f}'.format(cv_score))\n sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(\n cv_score, 5)))\n makedirs(sub_folder, exist_ok=True)\n test['target'] = predictions\n test[['ID_code', 'target']].to_csv(path.join(sub_folder,\n 'submission.csv'), index=False)\n raw['oof'] = np.concatenate([oof, predictions], axis=0)\n raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=\n False)\n",
"step-5": "import gc\r\nimport sys\r\nimport time\r\nimport warnings\r\nimport multiprocessing\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport lightgbm as lgb\r\n\r\nfrom os import path, makedirs\r\nfrom tqdm import tqdm\r\nfrom utils import Logger\r\nfrom datetime import datetime\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.model_selection import StratifiedKFold\r\n\r\n\r\n# ======================================================================= Method\r\ndef load_dataframe(dataset):\r\n return pd.read_csv(dataset)\r\n\r\n\r\ndef augment(x, y, t=2):\r\n xs, xn = [], []\r\n for i in range(t):\r\n mask = y > 0\r\n x1 = x[mask].copy()\r\n for c in range(200):\r\n val = x1[:, [c, c+200, c+400]]\r\n np.random.shuffle(val)\r\n x1[:, [c, c+200, c+400]] = val\r\n xs.append(x1)\r\n\r\n for i in range(t//2):\r\n mask = y == 0\r\n x1 = x[mask].copy()\r\n for c in range(200):\r\n val = x1[:, [c, c+200, c+400]]\r\n np.random.shuffle(val)\r\n x1[:, [c, c+200, c+400]] = val\r\n xn.append(x1)\r\n\r\n xs = np.vstack(xs)\r\n xn = np.vstack(xn)\r\n ys = np.ones(xs.shape[0])\r\n yn = np.zeros(xn.shape[0])\r\n x = np.vstack([x, xs, xn])\r\n y = np.concatenate([y, ys, yn])\r\n return x, y\r\n\r\n\r\n# ======================================================================= Main\r\nif __name__ == '__main__':\r\n gc.enable()\r\n pd.set_option('max_rows', None)\r\n pd.set_option('max_columns', None)\r\n warnings.simplefilter('ignore', UserWarning)\r\n\r\n # =================================================================== Params\r\n top_folder = './output'\r\n\r\n today = datetime.today()\r\n now = today.strftime('%m%d-%H%M')\r\n log_name = now + '.txt'\r\n sys.stdout = Logger(path.join(top_folder, log_name))\r\n\r\n seed_np = 1011\r\n np.random.seed(seed_np)\r\n print('numpy seed: {}'.format(seed_np))\r\n\r\n # =================================================================== Load Data\r\n start = time.time()\r\n with multiprocessing.Pool() as pool:\r\n train, test = pool.map(load_dataframe, ['./input/train.csv', './input/test.csv'])\r\n\r\n # === fake sample\r\n df_test = test.drop(columns=['ID_code']).values\r\n\r\n unique_samples = []\r\n unique_count = np.zeros_like(df_test)\r\n for feature in tqdm(range(df_test.shape[1])):\r\n _, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True)\r\n unique_count[index_[count_ == 1], feature] += 1\r\n\r\n idx_score = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\r\n idx_synthetic = np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\r\n\r\n synthetic = test.loc[idx_synthetic]\r\n test = test.loc[idx_score]\r\n\r\n raw = pd.concat([train, test], axis=0, sort=False, ignore_index=True)\r\n\r\n # ============================== Extra Feature\r\n len_train = len(train)\r\n col_var = list(raw.columns[2:])\r\n\r\n # === replace value(frequency=1) to NA\r\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\r\n for col in tqdm(col_var):\r\n cnt = raw[col].value_counts()\r\n val = cnt[cnt == 1].index\r\n mask.loc[np.isin(raw[col], val), col] = 0\r\n col_repeat = [col + '_repeat_2' for col in col_var]\r\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\r\n\r\n # === replace value(frequency=1/2) to NA\r\n mask = pd.DataFrame(np.ones([raw.shape[0], len(col_var)]), columns=col_var)\r\n for col in tqdm(col_var):\r\n cnt = raw[col].value_counts()\r\n val = cnt[np.isin(cnt, [1, 2])].index\r\n mask.loc[np.isin(raw[col], val), col] = 0\r\n col_repeat = [col + '_repeat_3' for col in col_var]\r\n raw[col_repeat] = raw[col_var][mask.astype(bool)]\r\n\r\n raw = pd.concat([raw, synthetic], axis=0, sort=False, ignore_index=True)\r\n\r\n # === logging\r\n print('data: {}'.format(raw.shape))\r\n print('elapsed time: {:.1f} min'.format((time.time() - start)/60))\r\n\r\n # =================================================================== PreProcess\r\n feats = [col for col in raw.columns.values if col not in ['ID_code', 'target']]\r\n\r\n # =================================================================== Model\r\n train = raw[:len_train]\r\n test = raw[len_train:].copy()\r\n\r\n x_train = train[feats]\r\n y_train = train['target']\r\n x_test = test[feats]\r\n\r\n print('trn_x: {}'.format(x_train.shape))\r\n print('x_test: {}'.format(x_test.shape))\r\n\r\n param = {\r\n 'objective': 'binary',\r\n 'boosting': 'gbdt',\r\n 'metric': 'auc',\r\n 'verbosity': -1,\r\n 'n_jobs': 11,\r\n 'random_state': 1993,\r\n 'learning_rate': 0.01,\r\n\r\n 'num_leaves': 8,\r\n 'max_depth': -1,\r\n 'feature_fraction': 0.05,\r\n 'bagging_freq': 5,\r\n 'bagging_fraction': 0.4,\r\n 'min_data_in_leaf': 80,\r\n 'min_sum_hessian_in_leaf': 10.0,\r\n }\r\n print('model params:\\n{}'.format(pd.Series(list(param.values()), index=list(param.keys()))))\r\n\r\n seed_fold = 26\r\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed_fold)\r\n print('StratifiedKFold seed: {}'.format(seed_fold))\r\n\r\n round_max = 30000\r\n round_early_stopping = 3000\r\n print('num_round: {}'.format(round_max))\r\n print('early_stopping_round: {}'.format(round_early_stopping))\r\n\r\n # === training\r\n oof = np.zeros(len(x_train))\r\n predictions = np.zeros(len(x_test))\r\n\r\n start = time.time()\r\n for fold_, (trn_idx, val_idx) in enumerate(folds.split(x_train.values, y_train.values)):\r\n print(\"fold n°{}\".format(fold_))\r\n\r\n trn_x, trn_y = x_train.iloc[trn_idx], y_train.iloc[trn_idx]\r\n val_x, val_y = x_train.iloc[val_idx], y_train.iloc[val_idx]\r\n\r\n N = 5\r\n for i in range(N):\r\n X_t, y_t = augment(trn_x.values, trn_y.values)\r\n X_t = pd.DataFrame(X_t, columns=feats)\r\n\r\n trn_data = lgb.Dataset(X_t, label=y_t)\r\n val_data = lgb.Dataset(val_x, label=val_y)\r\n\r\n evals_result = {}\r\n clf = lgb.train(param,\r\n trn_data,\r\n round_max,\r\n valid_sets=[trn_data, val_data],\r\n early_stopping_rounds=round_early_stopping,\r\n verbose_eval=1000,\r\n evals_result=evals_result)\r\n\r\n oof[val_idx] += clf.predict(val_x, num_iteration=clf.best_iteration) / N\r\n predictions += clf.predict(x_test, num_iteration=clf.best_iteration) / folds.n_splits / N\r\n\r\n fold_score = roc_auc_score(val_y, oof[val_idx])\r\n print('fold {} auc score: {:.5f}'.format(fold_, fold_score))\r\n\r\n cv_score = roc_auc_score(y_train, oof)\r\n print('elapsed time: {:.1f} min'.format((time.time() - start)/60))\r\n print('auc score: {:.5f}'.format(cv_score))\r\n\r\n # =================================================================== Saving File\r\n sub_folder = path.join(top_folder, 'cv_' + now + '_' + str(np.round(cv_score, 5)))\r\n makedirs(sub_folder, exist_ok=True)\r\n\r\n test['target'] = predictions\r\n test[['ID_code', 'target']].to_csv(path.join(sub_folder, 'submission.csv'), index=False)\r\n\r\n raw['oof'] = np.concatenate([oof, predictions], axis=0)\r\n raw[['ID_code', 'oof']].to_csv(path.join(sub_folder, 'oof.csv'), index=False)\r\n\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 3.1.6 on 2021-02-27 23:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('RMS', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='inventorytable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='menuitemstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Ingredient_ID',
new_name='Ingredient',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Item_ID',
new_name='Item',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='seatmanagementtable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
]
|
normal
|
{
"blob_id": "ba336094d38a47457198919ce60969144a8fdedb",
"index": 5374,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('RMS', '0001_initial')]\n operations = [migrations.RenameField(model_name='inventorytable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='menuitemstable', old_name='Restaurant_ID',\n new_name='Restaurant'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Ingredient_ID', new_name=\n 'Ingredient'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Item_ID', new_name='Item'),\n migrations.RenameField(model_name='reciperequirementstable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='seatmanagementtable', old_name=\n 'Restaurant_ID', new_name='Restaurant')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('RMS', '0001_initial')]\n operations = [migrations.RenameField(model_name='inventorytable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='menuitemstable', old_name='Restaurant_ID',\n new_name='Restaurant'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Ingredient_ID', new_name=\n 'Ingredient'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Item_ID', new_name='Item'),\n migrations.RenameField(model_name='reciperequirementstable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='seatmanagementtable', old_name=\n 'Restaurant_ID', new_name='Restaurant')]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-27 23:29\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('RMS', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='inventorytable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='menuitemstable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Ingredient_ID',\n new_name='Ingredient',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Item_ID',\n new_name='Item',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='seatmanagementtable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
REGION_LIST = [
'Центральный',
'Северо-Западный',
'Южный',
'Северо-Кавказский',
'Приволжский',
'Уральский',
'Сибирский',
'Дальневосточный',
]
CITY_LIST = {
'Абакан': 7,
'Альметьевск': 5,
'Ангарск': 7,
'Архангельск': 2,
'Астрахань': 3,
'Барнаул': 7,
'Батайск': 3,
'Белгород': 1,
'Бийск': 7,
'Благовещенск': 8,
'Братск': 7,
'Брянск': 1,
'Великий Новгород': 2,
'Владивосток': 8,
'Владикавказ': 4,
'Владимир': 1,
'Волгоград': 3,
'Волжский': 3,
'Вологда': 2,
'Воронеж': 1,
'Грозный': 4,
'Дзержинск': 5,
'Екатеринбург': 6,
'Иваново': 1,
'Ижевск': 5,
'Иркутск': 7,
'Йошкар-Ола': 5,
'Казань': 5,
'Калининград': 2,
'Калуга': 1,
'Кемерово': 7,
'Киров': 5,
'Комсомольск-на-Амуре': 8,
'Кострома': 1,
'Краснодар': 3,
'Красноярск': 7,
'Курган': 6,
'Курск': 1,
'Липецк': 1,
'Магнитогорск': 6,
'Махачкала': 4,
'Миасс': 6,
'Минеральные Воды': 4,
'Москва и Подмосковье': 1,
'Москва': 1,
'Мурманск': 2,
'Набережные Челны': 5,
'Нальчик': 4,
'Нефтекамск': 5,
'Нижневартовск': 6,
'Нижнекамск': 5,
'Нижний Новгород': 5,
'Нижний Тагил': 6,
'Новокузнецк': 7,
'Новомосковск': 1,
'Новороссийск': 3,
'Новосибирск': 7,
'Ноябрьск': 6,
'Обнинск': 1,
'Октябрьский': 5,
'Омск': 7,
'Орел': 1,
'Оренбург': 5,
'Орск': 5,
'Пенза': 5,
'Пермь': 5,
'Петрозаводск': 2,
'Петропавловск-Камчатский': 8,
'Прокопьевск': 7,
'Псков': 2,
'Пятигорск': 4,
'Ростов-на-Дону': 3,
'Рязань': 1,
'Самара': 5,
'Санкт-Петербург': 2,
'Саранск': 5,
'Саратов': 5,
'Севастополь': 3,
'Северодвинск': 2,
'Симферополь': 3,
'Смоленск': 1,
'Сочи': 3,
'Ставрополь': 4,
'Старый Оскол': 1,
'Стерлитамак': 5,
'Сургут': 6,
'Сыктывкар': 2,
'Таганрог': 3,
'Тамбов': 1,
'Тверь': 1,
'Тольятти': 5,
'Томск': 7,
'Тула': 1,
'Тюмень': 6,
'Улан-Удэ': 7,
'Ульяновск': 5,
'Уфа': 5,
'Хабаровск': 8,
'Чебоксары': 5,
'Челябинск': 6,
'Череповец': 2,
'Чита': 7,
'Шахты': 3,
'Энгельс': 5,
'Южно-Сахалинск': 8,
'Якутск': 8,
'Ярославль': 1,
}
|
normal
|
{
"blob_id": "2101299d6f6bfcd4726591fc256317968373ca1f",
"index": 3071,
"step-1": "<mask token>\n",
"step-2": "REGION_LIST = ['Центральный', 'Северо-Западный', 'Южный',\n 'Северо-Кавказский', 'Приволжский', 'Уральский', 'Сибирский',\n 'Дальневосточный']\nCITY_LIST = {'Абакан': 7, 'Альметьевск': 5, 'Ангарск': 7, 'Архангельск': 2,\n 'Астрахань': 3, 'Барнаул': 7, 'Батайск': 3, 'Белгород': 1, 'Бийск': 7,\n 'Благовещенск': 8, 'Братск': 7, 'Брянск': 1, 'Великий Новгород': 2,\n 'Владивосток': 8, 'Владикавказ': 4, 'Владимир': 1, 'Волгоград': 3,\n 'Волжский': 3, 'Вологда': 2, 'Воронеж': 1, 'Грозный': 4, 'Дзержинск': 5,\n 'Екатеринбург': 6, 'Иваново': 1, 'Ижевск': 5, 'Иркутск': 7,\n 'Йошкар-Ола': 5, 'Казань': 5, 'Калининград': 2, 'Калуга': 1, 'Кемерово':\n 7, 'Киров': 5, 'Комсомольск-на-Амуре': 8, 'Кострома': 1, 'Краснодар': 3,\n 'Красноярск': 7, 'Курган': 6, 'Курск': 1, 'Липецк': 1, 'Магнитогорск': \n 6, 'Махачкала': 4, 'Миасс': 6, 'Минеральные Воды': 4,\n 'Москва и Подмосковье': 1, 'Москва': 1, 'Мурманск': 2,\n 'Набережные Челны': 5, 'Нальчик': 4, 'Нефтекамск': 5, 'Нижневартовск': \n 6, 'Нижнекамск': 5, 'Нижний Новгород': 5, 'Нижний Тагил': 6,\n 'Новокузнецк': 7, 'Новомосковск': 1, 'Новороссийск': 3, 'Новосибирск': \n 7, 'Ноябрьск': 6, 'Обнинск': 1, 'Октябрьский': 5, 'Омск': 7, 'Орел': 1,\n 'Оренбург': 5, 'Орск': 5, 'Пенза': 5, 'Пермь': 5, 'Петрозаводск': 2,\n 'Петропавловск-Камчатский': 8, 'Прокопьевск': 7, 'Псков': 2,\n 'Пятигорск': 4, 'Ростов-на-Дону': 3, 'Рязань': 1, 'Самара': 5,\n 'Санкт-Петербург': 2, 'Саранск': 5, 'Саратов': 5, 'Севастополь': 3,\n 'Северодвинск': 2, 'Симферополь': 3, 'Смоленск': 1, 'Сочи': 3,\n 'Ставрополь': 4, 'Старый Оскол': 1, 'Стерлитамак': 5, 'Сургут': 6,\n 'Сыктывкар': 2, 'Таганрог': 3, 'Тамбов': 1, 'Тверь': 1, 'Тольятти': 5,\n 'Томск': 7, 'Тула': 1, 'Тюмень': 6, 'Улан-Удэ': 7, 'Ульяновск': 5,\n 'Уфа': 5, 'Хабаровск': 8, 'Чебоксары': 5, 'Челябинск': 6, 'Череповец': \n 2, 'Чита': 7, 'Шахты': 3, 'Энгельс': 5, 'Южно-Сахалинск': 8, 'Якутск': \n 8, 'Ярославль': 1}\n",
"step-3": "REGION_LIST = [\n 'Центральный',\n 'Северо-Западный',\n 'Южный',\n 'Северо-Кавказский',\n 'Приволжский',\n 'Уральский',\n 'Сибирский',\n 'Дальневосточный',\n]\n\nCITY_LIST = {\n 'Абакан': 7,\n 'Альметьевск': 5,\n 'Ангарск': 7,\n 'Архангельск': 2,\n 'Астрахань': 3,\n 'Барнаул': 7,\n 'Батайск': 3,\n 'Белгород': 1,\n 'Бийск': 7,\n 'Благовещенск': 8,\n 'Братск': 7,\n 'Брянск': 1,\n 'Великий Новгород': 2,\n 'Владивосток': 8,\n 'Владикавказ': 4,\n 'Владимир': 1,\n 'Волгоград': 3,\n 'Волжский': 3,\n 'Вологда': 2,\n 'Воронеж': 1,\n 'Грозный': 4,\n 'Дзержинск': 5,\n 'Екатеринбург': 6,\n 'Иваново': 1,\n 'Ижевск': 5,\n 'Иркутск': 7,\n 'Йошкар-Ола': 5,\n 'Казань': 5,\n 'Калининград': 2,\n 'Калуга': 1,\n 'Кемерово': 7,\n 'Киров': 5,\n 'Комсомольск-на-Амуре': 8,\n 'Кострома': 1,\n 'Краснодар': 3,\n 'Красноярск': 7,\n 'Курган': 6,\n 'Курск': 1,\n 'Липецк': 1,\n 'Магнитогорск': 6,\n 'Махачкала': 4,\n 'Миасс': 6,\n 'Минеральные Воды': 4,\n 'Москва и Подмосковье': 1,\n 'Москва': 1,\n 'Мурманск': 2,\n 'Набережные Челны': 5,\n 'Нальчик': 4,\n 'Нефтекамск': 5,\n 'Нижневартовск': 6,\n 'Нижнекамск': 5,\n 'Нижний Новгород': 5,\n 'Нижний Тагил': 6,\n 'Новокузнецк': 7,\n 'Новомосковск': 1,\n 'Новороссийск': 3,\n 'Новосибирск': 7,\n 'Ноябрьск': 6,\n 'Обнинск': 1,\n 'Октябрьский': 5,\n 'Омск': 7,\n 'Орел': 1,\n 'Оренбург': 5,\n 'Орск': 5,\n 'Пенза': 5,\n 'Пермь': 5,\n 'Петрозаводск': 2,\n 'Петропавловск-Камчатский': 8,\n 'Прокопьевск': 7,\n 'Псков': 2,\n 'Пятигорск': 4,\n 'Ростов-на-Дону': 3,\n 'Рязань': 1,\n 'Самара': 5,\n 'Санкт-Петербург': 2,\n 'Саранск': 5,\n 'Саратов': 5,\n 'Севастополь': 3,\n 'Северодвинск': 2,\n 'Симферополь': 3,\n 'Смоленск': 1,\n 'Сочи': 3,\n 'Ставрополь': 4,\n 'Старый Оскол': 1,\n 'Стерлитамак': 5,\n 'Сургут': 6,\n 'Сыктывкар': 2,\n 'Таганрог': 3,\n 'Тамбов': 1,\n 'Тверь': 1,\n 'Тольятти': 5,\n 'Томск': 7,\n 'Тула': 1,\n 'Тюмень': 6,\n 'Улан-Удэ': 7,\n 'Ульяновск': 5,\n 'Уфа': 5,\n 'Хабаровск': 8,\n 'Чебоксары': 5,\n 'Челябинск': 6,\n 'Череповец': 2,\n 'Чита': 7,\n 'Шахты': 3,\n 'Энгельс': 5,\n 'Южно-Сахалинск': 8,\n 'Якутск': 8,\n 'Ярославль': 1,\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from sys import exit
from os import stat
file = open("fiunamfs.img","r")
nombre = file.read(8)
file.seek(10)
version = file.read(3)
file.seek(20)
etiqueta = file.read(15)
file.seek(40)
cluster = file.read(5)
file.seek(47)
numero = file.read(2)
file.seek(52)
numeroCompleto = file.read(8)
file.close()
archivos = []
tams = []
clusters = []
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu=clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open("fiunamfs.img","r+")
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell()+36)
file.close()
def info():
print("Nombre del Sistema: " + nombre)
print("Version: " + version)
print("Etiqueta del Volumen: " + etiqueta)
print("Tamano del cluster en bytes: " + cluster)
print("Numero de clusters que mide el directorio: " + numero)
print("Numero de cluster que mide la unidad completa: " + numeroCompleto)
def listar():
file = open("fiunamfs.img","r")
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell()+49)
file.close()
def borrar(archivo):
borrado = False
file = open("fiunamfs.img","r+")
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell()-15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell()+49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
def dePcASistema(path, nombre):
posicion =0
actual =0
try:
new = open(path,"r+")
file = open("fiunamfs.img","r+")
file.seek(2048)
bandera = False
tam = stat(path).st_size
while(bandera == False):
name = file.read(15)
if (name == 'Xx.xXx.xXx.xXx.'):
file.seek(file.tell()-15)
file.write(nombre)
actual = file.tell()
print("El archivo fue copiado")
bandera = True
file.seek(file.tell()+49)
file.close()
file = open("fiunamfs.img","r+")
pa = clusterVacio()
inde = 2048*pa
tamano = tamaArchivo(path)
file.seek(inde)
file.write(new.read(tamano))
file.close()
file = open("fiunamfs.img","r+")
file.seek(actual)
file.write(str(pa))
file.close()
except:
print("Este archivo no existe")
def deSistemaAPc(archivo,nombre):
tam = 0
clu = 0
file = open("fiunamfs.img","r") #Se abre el archivo en modo solo lectura
file.seek(2048) #Se salta el superbloque
new = open(archivo,"r+")
for i in range(64):
name = file.read(15)
aux = name.strip()
if (aux == nombre):
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048*clu
file = open("fiunamfs.img","r")
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if (path[i] == '/'):
slash = i
for i in range(slash+1,tam):
name = name + path[i]
##Agregar funcion de limiar nombres de los archivos a 15 caracteres
espaces = 15 - len(name)
for i in range (espaces):
name2 = name2 + " "
return name2 + name
if (nombre == "FiUnamFS" and version == "0.7"):
correcto = True
while(correcto):
tablaArchivos()
print("Sistema de Archivos FI Unam FS")
print("1: Listar")
print("2: Copiar archivo")
print("3: Copiar archivo a la computadora")
print("4: Eliminar archivo")
print("5: Desgramentar")
print("6: Mostar informacion del sistema de archivos")
print("7: Salir")
opcion = input("Opcion: ")
if opcion == 6:
info()
elif opcion == 1:
listar()
elif opcion == 4:
archivo = raw_input("Nombre del archivo a borrar: ")
if(borrar(archivo)):
print('El archivo fue borrado')
else:
print('No se encontro el archivo')
elif opcion == 3:
archivo = raw_input("Nombre del archivo a copiar: ")
nombre = nombreArchivo(archivo)
deSistemaAPc(archivo, nombre)
elif opcion == 2:
archivo = raw_input("Nombre del archivo a copiar: ")
nombre = nombreArchivo(archivo)
dePcASistema(archivo, nombre)
elif opcion == 9:
print(archivos)
print(clusters)
print(tams)
elif opcion == 8:
va = clusterVacio()
print (va)
elif opcion == 7:
print("Sistema desmontado")
correcto = False
elif opcion == 5:
print("No se implemento")
else:
print("No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.")
exit()
|
normal
|
{
"blob_id": "da69fd937153fe2112b9f64411882527274247ef",
"index": 1878,
"step-1": "<mask token>\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\n<mask token>\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\ndef dePcASistema(path, nombre):\n posicion = 0\n actual = 0\n try:\n new = open(path, 'r+')\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while bandera == False:\n name = file.read(15)\n if name == 'Xx.xXx.xXx.xXx.':\n file.seek(file.tell() - 15)\n file.write(nombre)\n actual = file.tell()\n print('El archivo fue copiado')\n bandera = True\n file.seek(file.tell() + 49)\n file.close()\n file = open('fiunamfs.img', 'r+')\n pa = clusterVacio()\n inde = 2048 * pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open('fiunamfs.img', 'r+')\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print('Este archivo no existe')\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\n<mask token>\n",
"step-3": "<mask token>\nfile.seek(10)\n<mask token>\nfile.seek(20)\n<mask token>\nfile.seek(40)\n<mask token>\nfile.seek(47)\n<mask token>\nfile.seek(52)\n<mask token>\nfile.close()\n<mask token>\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\ndef dePcASistema(path, nombre):\n posicion = 0\n actual = 0\n try:\n new = open(path, 'r+')\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while bandera == False:\n name = file.read(15)\n if name == 'Xx.xXx.xXx.xXx.':\n file.seek(file.tell() - 15)\n file.write(nombre)\n actual = file.tell()\n print('El archivo fue copiado')\n bandera = True\n file.seek(file.tell() + 49)\n file.close()\n file = open('fiunamfs.img', 'r+')\n pa = clusterVacio()\n inde = 2048 * pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open('fiunamfs.img', 'r+')\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print('Este archivo no existe')\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\nif nombre == 'FiUnamFS' and version == '0.7':\n correcto = True\n while correcto:\n tablaArchivos()\n print('Sistema de Archivos FI Unam FS')\n print('1: Listar')\n print('2: Copiar archivo')\n print('3: Copiar archivo a la computadora')\n print('4: Eliminar archivo')\n print('5: Desgramentar')\n print('6: Mostar informacion del sistema de archivos')\n print('7: Salir')\n opcion = input('Opcion: ')\n if opcion == 6:\n info()\n elif opcion == 1:\n listar()\n elif opcion == 4:\n archivo = raw_input('Nombre del archivo a borrar: ')\n if borrar(archivo):\n print('El archivo fue borrado')\n else:\n print('No se encontro el archivo')\n elif opcion == 3:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n deSistemaAPc(archivo, nombre)\n elif opcion == 2:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n dePcASistema(archivo, nombre)\n elif opcion == 9:\n print(archivos)\n print(clusters)\n print(tams)\n elif opcion == 8:\n va = clusterVacio()\n print(va)\n elif opcion == 7:\n print('Sistema desmontado')\n correcto = False\n elif opcion == 5:\n print('No se implemento')\nelse:\n print(\n 'No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.'\n )\n exit()\n",
"step-4": "from sys import exit\nfrom os import stat\nfile = open('fiunamfs.img', 'r')\nnombre = file.read(8)\nfile.seek(10)\nversion = file.read(3)\nfile.seek(20)\netiqueta = file.read(15)\nfile.seek(40)\ncluster = file.read(5)\nfile.seek(47)\nnumero = file.read(2)\nfile.seek(52)\nnumeroCompleto = file.read(8)\nfile.close()\narchivos = []\ntams = []\nclusters = []\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\ndef dePcASistema(path, nombre):\n posicion = 0\n actual = 0\n try:\n new = open(path, 'r+')\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while bandera == False:\n name = file.read(15)\n if name == 'Xx.xXx.xXx.xXx.':\n file.seek(file.tell() - 15)\n file.write(nombre)\n actual = file.tell()\n print('El archivo fue copiado')\n bandera = True\n file.seek(file.tell() + 49)\n file.close()\n file = open('fiunamfs.img', 'r+')\n pa = clusterVacio()\n inde = 2048 * pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open('fiunamfs.img', 'r+')\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print('Este archivo no existe')\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\nif nombre == 'FiUnamFS' and version == '0.7':\n correcto = True\n while correcto:\n tablaArchivos()\n print('Sistema de Archivos FI Unam FS')\n print('1: Listar')\n print('2: Copiar archivo')\n print('3: Copiar archivo a la computadora')\n print('4: Eliminar archivo')\n print('5: Desgramentar')\n print('6: Mostar informacion del sistema de archivos')\n print('7: Salir')\n opcion = input('Opcion: ')\n if opcion == 6:\n info()\n elif opcion == 1:\n listar()\n elif opcion == 4:\n archivo = raw_input('Nombre del archivo a borrar: ')\n if borrar(archivo):\n print('El archivo fue borrado')\n else:\n print('No se encontro el archivo')\n elif opcion == 3:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n deSistemaAPc(archivo, nombre)\n elif opcion == 2:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n dePcASistema(archivo, nombre)\n elif opcion == 9:\n print(archivos)\n print(clusters)\n print(tams)\n elif opcion == 8:\n va = clusterVacio()\n print(va)\n elif opcion == 7:\n print('Sistema desmontado')\n correcto = False\n elif opcion == 5:\n print('No se implemento')\nelse:\n print(\n 'No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.'\n )\n exit()\n",
"step-5": "from sys import exit\nfrom os import stat\n\nfile = open(\"fiunamfs.img\",\"r\")\nnombre = file.read(8)\nfile.seek(10)\nversion = file.read(3)\nfile.seek(20)\netiqueta = file.read(15)\nfile.seek(40)\ncluster = file.read(5)\nfile.seek(47)\nnumero = file.read(2)\nfile.seek(52)\nnumeroCompleto = file.read(8)\nfile.close()\n\narchivos = []\ntams = []\nclusters = []\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu=clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca \n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell()+36)\n file.close()\n\ndef info():\n print(\"Nombre del Sistema: \" + nombre)\n print(\"Version: \" + version)\n print(\"Etiqueta del Volumen: \" + etiqueta)\n print(\"Tamano del cluster en bytes: \" + cluster)\n print(\"Numero de clusters que mide el directorio: \" + numero)\n print(\"Numero de cluster que mide la unidad completa: \" + numeroCompleto)\n\ndef listar():\n file = open(\"fiunamfs.img\",\"r\")\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell()+49)\n file.close()\n\ndef borrar(archivo):\n borrado = False\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell()-15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell()+49)\n file.close()\n return borrado\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\ndef dePcASistema(path, nombre):\n posicion =0\n actual =0\n try:\n new = open(path,\"r+\")\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while(bandera == False):\n name = file.read(15)\n if (name == 'Xx.xXx.xXx.xXx.'):\n file.seek(file.tell()-15)\n file.write(nombre)\n actual = file.tell()\n print(\"El archivo fue copiado\")\n bandera = True\n file.seek(file.tell()+49)\n file.close()\n file = open(\"fiunamfs.img\",\"r+\")\n pa = clusterVacio()\n inde = 2048*pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print(\"Este archivo no existe\")\n \ndef deSistemaAPc(archivo,nombre):\n tam = 0 \n clu = 0\n file = open(\"fiunamfs.img\",\"r\") #Se abre el archivo en modo solo lectura\n file.seek(2048) #Se salta el superbloque \n new = open(archivo,\"r+\")\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if (aux == nombre):\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048*clu\n file = open(\"fiunamfs.img\",\"r\")\n file.seek(aux2)\n new.write(file.read(tam))\n \n \ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if (path[i] == '/'):\n slash = i\n for i in range(slash+1,tam):\n name = name + path[i]\n ##Agregar funcion de limiar nombres de los archivos a 15 caracteres\n espaces = 15 - len(name)\n for i in range (espaces):\n name2 = name2 + \" \"\n return name2 + name\n \n\n \n\nif (nombre == \"FiUnamFS\" and version == \"0.7\"):\n correcto = True\n while(correcto):\n tablaArchivos()\n print(\"Sistema de Archivos FI Unam FS\")\n print(\"1: Listar\")\n print(\"2: Copiar archivo\")\n print(\"3: Copiar archivo a la computadora\")\n print(\"4: Eliminar archivo\")\n print(\"5: Desgramentar\")\n print(\"6: Mostar informacion del sistema de archivos\")\n print(\"7: Salir\")\n opcion = input(\"Opcion: \")\n if opcion == 6:\n info()\n elif opcion == 1:\n listar()\n elif opcion == 4:\n archivo = raw_input(\"Nombre del archivo a borrar: \")\n if(borrar(archivo)):\n print('El archivo fue borrado')\n else:\n print('No se encontro el archivo')\n elif opcion == 3:\n archivo = raw_input(\"Nombre del archivo a copiar: \")\n nombre = nombreArchivo(archivo)\n deSistemaAPc(archivo, nombre)\n elif opcion == 2:\n archivo = raw_input(\"Nombre del archivo a copiar: \")\n nombre = nombreArchivo(archivo)\n dePcASistema(archivo, nombre)\n elif opcion == 9:\n print(archivos)\n print(clusters)\n print(tams)\n elif opcion == 8:\n va = clusterVacio()\n print (va)\n elif opcion == 7:\n print(\"Sistema desmontado\")\n correcto = False\n elif opcion == 5:\n print(\"No se implemento\")\nelse:\n print(\"No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.\")\n exit()\n\n \n\n\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
"""APP Cloud Connect errors"""
class CCEError(Exception):
pass
class ConfigException(CCEError):
"""Config exception"""
pass
class FuncException(CCEError):
"""Ext function call exception"""
pass
class HTTPError(CCEError):
""" HTTPError raised when HTTP request returned a error."""
def __init__(self, reason=None):
"""
Initialize HTTPError with `response` object and `status`.
"""
self.reason = reason
super(HTTPError, self).__init__(reason)
class StopCCEIteration(CCEError):
"""Exception to exit from the engine iteration."""
pass
class CCESplitError(CCEError):
"""Exception to exit the job in Split Task"""
pass
class QuitJobError(CCEError):
pass
|
normal
|
{
"blob_id": "e2840eb1b0d731d6b0356835ba371d05ba351ff6",
"index": 5323,
"step-1": "<mask token>\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-2": "<mask token>\n\n\nclass FuncException(CCEError):\n <mask token>\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-3": "<mask token>\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-4": "<mask token>\n\n\nclass ConfigException(CCEError):\n \"\"\"Config exception\"\"\"\n pass\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-5": "\"\"\"APP Cloud Connect errors\"\"\"\n\n\nclass CCEError(Exception):\n pass\n\n\nclass ConfigException(CCEError):\n \"\"\"Config exception\"\"\"\n pass\n\n\nclass FuncException(CCEError):\n \"\"\"Ext function call exception\"\"\"\n pass\n\n\nclass HTTPError(CCEError):\n \"\"\" HTTPError raised when HTTP request returned a error.\"\"\"\n\n def __init__(self, reason=None):\n \"\"\"\n Initialize HTTPError with `response` object and `status`.\n \"\"\"\n self.reason = reason\n super(HTTPError, self).__init__(reason)\n\n\nclass StopCCEIteration(CCEError):\n \"\"\"Exception to exit from the engine iteration.\"\"\"\n pass\n\n\nclass CCESplitError(CCEError):\n \"\"\"Exception to exit the job in Split Task\"\"\"\n pass\n\n\nclass QuitJobError(CCEError):\n pass\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
import setuptools
setuptools.setup(name='cppersist', install_requires=['Eve'])
|
normal
|
{
"blob_id": "4f1956b34ac3b55b2d40220b79816c139b4a2f5c",
"index": 9574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetuptools.setup(name='cppersist', install_requires=['Eve'])\n",
"step-3": "import setuptools\nsetuptools.setup(name='cppersist', install_requires=['Eve'])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
# Set a single thread per process for numpy with MKL/BLAS
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_DEBUG_CPU_TYPE'] = '5'
import numpy as np
from matplotlib import pyplot as plt
from copy import deepcopy
from kadal.optim_tools.MOBO import MOBO
from kadal.surrogate_models.kriging_model import Kriging
from kadal.surrogate_models.supports.initinfo import initkriginfo
from kadal.testcase.analyticalfcn.cases import evaluate
from kadal.misc.sampling.samplingplan import sampling
def generate_kriging():
# Sampling
nsample = 20
nvar = 2
nobj = 2
lb = -1 * np.ones(shape=[nvar])
ub = 1 * np.ones(shape=[nvar])
sampoption = "halton"
samplenorm, sample = sampling(sampoption, nvar, nsample, result="real",
upbound=ub, lobound=lb)
X = sample
# Evaluate sample
global y
y = evaluate(X, "schaffer")
# Initialize KrigInfo
KrigInfo1 = initkriginfo()
# Set KrigInfo
KrigInfo1["X"] = X
KrigInfo1["y"] = y[:, 0].reshape(-1, 1)
KrigInfo1["problem"] = "schaffer"
KrigInfo1["nrestart"] = 5
KrigInfo1["ub"] = ub
KrigInfo1["lb"] = lb
KrigInfo1["optimizer"] = "lbfgsb"
# Initialize KrigInfo
KrigInfo2 = deepcopy(KrigInfo1)
KrigInfo2['y'] = y[:, 1].reshape(-1, 1)
# Run Kriging
krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj1.train(n_cpu=n_cpu)
loocverr1, _ = krigobj1.loocvcalc()
print("LOOCV error of Kriging model: ", loocverr1, "%")
krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',
normy=False, trainvar=False)
krigobj2.train(n_cpu=n_cpu)
loocverr2, _ = krigobj2.loocvcalc()
print("LOOCV error of Kriging model: ", loocverr2, "%")
return krigobj1, krigobj2
def runopt(krigobj1, krigobj2):
moboInfo = dict()
moboInfo["nup"] = 3
moboInfo["nrestart"] = 10
moboInfo["acquifunc"] = "ehvi"
moboInfo["acquifuncopt"] = "lbfgsb"
Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5)
xupdate, yupdate, supdate, metricall = Optim.run(disp=True)
return xupdate, yupdate, metricall
if __name__ == '__main__':
krigobj1, krigobj2 = generate_kriging()
xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)
print(metricall)
plt.scatter(y[:, 0], y[:, 1])
plt.scatter(yupdate[:, 0], yupdate[:, 1])
plt.show()
|
normal
|
{
"blob_id": "ba289bcdc0aa7c2ad70dba7fac541900d0b55387",
"index": 7585,
"step-1": "<mask token>\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-3": "<mask token>\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\n<mask token>\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-4": "import os\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom copy import deepcopy\nfrom kadal.optim_tools.MOBO import MOBO\nfrom kadal.surrogate_models.kriging_model import Kriging\nfrom kadal.surrogate_models.supports.initinfo import initkriginfo\nfrom kadal.testcase.analyticalfcn.cases import evaluate\nfrom kadal.misc.sampling.samplingplan import sampling\n\n\ndef generate_kriging():\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = 'halton'\n samplenorm, sample = sampling(sampoption, nvar, nsample, result='real',\n upbound=ub, lobound=lb)\n X = sample\n global y\n y = evaluate(X, 'schaffer')\n KrigInfo1 = initkriginfo()\n KrigInfo1['X'] = X\n KrigInfo1['y'] = y[:, 0].reshape(-1, 1)\n KrigInfo1['problem'] = 'schaffer'\n KrigInfo1['nrestart'] = 5\n KrigInfo1['ub'] = ub\n KrigInfo1['lb'] = lb\n KrigInfo1['optimizer'] = 'lbfgsb'\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr1, '%')\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print('LOOCV error of Kriging model: ', loocverr2, '%')\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo['nup'] = 3\n moboInfo['nrestart'] = 10\n moboInfo['acquifunc'] = 'ehvi'\n moboInfo['acquifuncopt'] = 'lbfgsb'\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5\n )\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-5": "import os\n# Set a single thread per process for numpy with MKL/BLAS\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom copy import deepcopy\n\nfrom kadal.optim_tools.MOBO import MOBO\nfrom kadal.surrogate_models.kriging_model import Kriging\nfrom kadal.surrogate_models.supports.initinfo import initkriginfo\nfrom kadal.testcase.analyticalfcn.cases import evaluate\nfrom kadal.misc.sampling.samplingplan import sampling\n\n\ndef generate_kriging():\n # Sampling\n nsample = 20\n nvar = 2\n nobj = 2\n lb = -1 * np.ones(shape=[nvar])\n ub = 1 * np.ones(shape=[nvar])\n sampoption = \"halton\"\n samplenorm, sample = sampling(sampoption, nvar, nsample, result=\"real\",\n upbound=ub, lobound=lb)\n X = sample\n # Evaluate sample\n global y\n y = evaluate(X, \"schaffer\")\n\n # Initialize KrigInfo\n KrigInfo1 = initkriginfo()\n # Set KrigInfo\n KrigInfo1[\"X\"] = X\n KrigInfo1[\"y\"] = y[:, 0].reshape(-1, 1)\n KrigInfo1[\"problem\"] = \"schaffer\"\n KrigInfo1[\"nrestart\"] = 5\n KrigInfo1[\"ub\"] = ub\n KrigInfo1[\"lb\"] = lb\n KrigInfo1[\"optimizer\"] = \"lbfgsb\"\n\n # Initialize KrigInfo\n KrigInfo2 = deepcopy(KrigInfo1)\n KrigInfo2['y'] = y[:, 1].reshape(-1, 1)\n\n # Run Kriging\n krigobj1 = Kriging(KrigInfo1, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj1.train(n_cpu=n_cpu)\n loocverr1, _ = krigobj1.loocvcalc()\n print(\"LOOCV error of Kriging model: \", loocverr1, \"%\")\n\n krigobj2 = Kriging(KrigInfo2, standardization=True, standtype='default',\n normy=False, trainvar=False)\n krigobj2.train(n_cpu=n_cpu)\n loocverr2, _ = krigobj2.loocvcalc()\n print(\"LOOCV error of Kriging model: \", loocverr2, \"%\")\n\n return krigobj1, krigobj2\n\n\ndef runopt(krigobj1, krigobj2):\n moboInfo = dict()\n moboInfo[\"nup\"] = 3\n moboInfo[\"nrestart\"] = 10\n moboInfo[\"acquifunc\"] = \"ehvi\"\n moboInfo[\"acquifuncopt\"] = \"lbfgsb\"\n\n Optim = MOBO(moboInfo, [krigobj1, krigobj2], autoupdate=True, multiupdate=5)\n xupdate, yupdate, supdate, metricall = Optim.run(disp=True)\n\n return xupdate, yupdate, metricall\n\n\nif __name__ == '__main__':\n krigobj1, krigobj2 = generate_kriging()\n xupdate, yupdate, metricall = runopt(krigobj1, krigobj2)\n\n print(metricall)\n plt.scatter(y[:, 0], y[:, 1])\n plt.scatter(yupdate[:, 0], yupdate[:, 1])\n plt.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#from __future__ import absolute_import
#import os
from celery import Celery
#from django.conf import settings
#os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learning.settings')
app = Celery('tasks', broker="redis://localhost")
#app.config_from_object('django.conf:settings')
#app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task
def add(x, y):
return x+y
#print('Request:{0!r}'.format(self.request))
|
normal
|
{
"blob_id": "3ef114dd35ef3995ae73bf85bbe38db4fb7045d8",
"index": 7315,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef add(x, y):\n return x + y\n",
"step-3": "<mask token>\napp = Celery('tasks', broker='redis://localhost')\n\n\[email protected]\ndef add(x, y):\n return x + y\n",
"step-4": "from celery import Celery\napp = Celery('tasks', broker='redis://localhost')\n\n\[email protected]\ndef add(x, y):\n return x + y\n",
"step-5": "\n#from __future__ import absolute_import\n#import os\nfrom celery import Celery\n#from django.conf import settings\n\n#os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learning.settings')\napp = Celery('tasks', broker=\"redis://localhost\")\n\n\n#app.config_from_object('django.conf:settings')\n#app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n\n\[email protected]\ndef add(x, y):\n return x+y\n #print('Request:{0!r}'.format(self.request))\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import boto3
import os
from helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2
def getEC2FilteredRegionalInstanceInfo(region):
ec2RegionalClient = boto3.client('ec2', region_name = region)
paginator = ec2RegionalClient.get_paginator('describe_instances')
page_iterator = paginator.paginate()
allEC2Instances = []
for result in page_iterator:
for reservation in result['Reservations']:
for instance in reservation['Instances']:
allEC2Instances.append({'InstanceId': instance['InstanceId'] , 'Tags': instance.get('Tags',[])})
return excludeOptedOutEC2Instances(allEC2Instances)
def isOutputedOutEC2Instance(instanceInfo):
if any( (d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True') for d in instanceInfo['Tags']):
return True
def excludeOptedOutEC2Instances(ec2Instances):
filteredEC2InstanceIdList = []
for instanceInfo in ec2Instances:
if isOutputedOutEC2Instance(instanceInfo):
print('Exlcuding instance {}'.format(instanceInfo))
else:
filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])
return filteredEC2InstanceIdList
def gatherEC2Info():
regionList = getEC2Regions()
ec2RegionDict = {}
for region in regionList:
regionalInstances = getEC2FilteredRegionalInstanceInfo(region)
if len(regionalInstances)>0:
ec2RegionDict[region]=regionalInstances
return ec2RegionDict
def handler(event, context):
ec2RegionalInfo = gatherEC2Info()
if len(ec2RegionalInfo.keys())!=0:
print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))
messageAttributes = {
'notificationFor': {
'DataType': 'String',
'StringValue': SNS_NOTIFICATION_IIAS_EC2
}
}
sendDataToSNS(ec2RegionalInfo,messageAttributes)
else:
print('No new EC2 instances in IIAS scope')
|
normal
|
{
"blob_id": "d5f1601d11eb54e6c3dafab0137ec8f2358bb568",
"index": 4101,
"step-1": "<mask token>\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\n<mask token>\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-2": "<mask token>\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\n<mask token>\n\n\ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances) > 0:\n ec2RegionDict[region] = regionalInstances\n return ec2RegionDict\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-3": "<mask token>\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\ndef excludeOptedOutEC2Instances(ec2Instances):\n filteredEC2InstanceIdList = []\n for instanceInfo in ec2Instances:\n if isOutputedOutEC2Instance(instanceInfo):\n print('Exlcuding instance {}'.format(instanceInfo))\n else:\n filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])\n return filteredEC2InstanceIdList\n\n\ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances) > 0:\n ec2RegionDict[region] = regionalInstances\n return ec2RegionDict\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-4": "import json\nimport boto3\nimport os\nfrom helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name=region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'\n ], 'Tags': instance.get('Tags', [])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any(d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True' for\n d in instanceInfo['Tags']):\n return True\n\n\ndef excludeOptedOutEC2Instances(ec2Instances):\n filteredEC2InstanceIdList = []\n for instanceInfo in ec2Instances:\n if isOutputedOutEC2Instance(instanceInfo):\n print('Exlcuding instance {}'.format(instanceInfo))\n else:\n filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])\n return filteredEC2InstanceIdList\n\n\ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances) > 0:\n ec2RegionDict[region] = regionalInstances\n return ec2RegionDict\n\n\ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys()) != 0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {'notificationFor': {'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2}}\n sendDataToSNS(ec2RegionalInfo, messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')\n",
"step-5": "import json\nimport boto3\nimport os\nfrom helper import getEC2Regions, sendDataToSNS, OPTOUT_TAG, SNS_NOTIFICATION_IIAS_EC2\n\n\ndef getEC2FilteredRegionalInstanceInfo(region):\n ec2RegionalClient = boto3.client('ec2', region_name = region)\n paginator = ec2RegionalClient.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n allEC2Instances = []\n for result in page_iterator:\n for reservation in result['Reservations']:\n for instance in reservation['Instances']:\n allEC2Instances.append({'InstanceId': instance['InstanceId'] , 'Tags': instance.get('Tags',[])})\n return excludeOptedOutEC2Instances(allEC2Instances)\n\ndef isOutputedOutEC2Instance(instanceInfo):\n if any( (d['Key'] == '{}'.format(OPTOUT_TAG) and d['Value'] == 'True') for d in instanceInfo['Tags']):\n return True\n\n \ndef excludeOptedOutEC2Instances(ec2Instances):\n filteredEC2InstanceIdList = []\n for instanceInfo in ec2Instances:\n if isOutputedOutEC2Instance(instanceInfo):\n print('Exlcuding instance {}'.format(instanceInfo))\n else:\n filteredEC2InstanceIdList.append(instanceInfo['InstanceId'])\n return filteredEC2InstanceIdList\n \n \ndef gatherEC2Info():\n regionList = getEC2Regions()\n ec2RegionDict = {}\n for region in regionList:\n regionalInstances = getEC2FilteredRegionalInstanceInfo(region)\n if len(regionalInstances)>0:\n ec2RegionDict[region]=regionalInstances\n return ec2RegionDict\n\n\n \ndef handler(event, context):\n ec2RegionalInfo = gatherEC2Info()\n if len(ec2RegionalInfo.keys())!=0:\n print('Sending following ec2 info for CW : {}'.format(ec2RegionalInfo))\n messageAttributes = {\n 'notificationFor': {\n 'DataType': 'String',\n 'StringValue': SNS_NOTIFICATION_IIAS_EC2\n }\n }\n sendDataToSNS(ec2RegionalInfo,messageAttributes)\n else:\n print('No new EC2 instances in IIAS scope')",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render_to_response
from post.models import Post
#def ver_un_post(request, idpost):
# post = Post.objects.get(id=idpost)
#
# return render_to_response("post.html",{"post":post,},)
def home(request):
cursos = Curso.objects.order_by("numero")
return render_to_response("home.html",{"posts":posts},)
|
normal
|
{
"blob_id": "bd81f4431699b1750c69b0bbc82f066332349fbd",
"index": 8976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef home(request):\n cursos = Curso.objects.order_by('numero')\n return render_to_response('home.html', {'posts': posts})\n",
"step-3": "from django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom post.models import Post\n\n\ndef home(request):\n cursos = Curso.objects.order_by('numero')\n return render_to_response('home.html', {'posts': posts})\n",
"step-4": "from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.shortcuts import render_to_response\n\nfrom post.models import Post\n\n#def ver_un_post(request, idpost):\n# post = Post.objects.get(id=idpost)\n# \n# return render_to_response(\"post.html\",{\"post\":post,},)\n\ndef home(request):\n cursos = Curso.objects.order_by(\"numero\")\n \n return render_to_response(\"home.html\",{\"posts\":posts},)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
input = sys.stdin.readline
from collections import deque
size, num = map(int, input().split())
position = list(map(int, input().split()))
cnt = 0
nums = []
for k in range(1, size + 1):
nums.append(k)
size = deque(nums)
position = deque(position)
while position != deque([]):
if position[0] == 1:
size.popleft()
position.popleft()
for i in range(len(position)):
position[i] -= 1
else:
right = 0
left = 0
if position[0] <= (len(size) + 2) // 2:
size.rotate(-1)
cnt += 1
for i in range(len(position)):
position[i] -= 1
if position[i] <= 0:
position[i] = len(size)
else:
size.rotate(1)
cnt += 1
for i in range(len(position)):
position[i] += 1
if position[i] > len(size):
position[i] = 1
print(cnt)
|
normal
|
{
"blob_id": "c0c0ed31a09f2b49448bc1f3519aa61daaba20af",
"index": 5023,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor k in range(1, size + 1):\n nums.append(k)\n<mask token>\nwhile position != deque([]):\n if position[0] == 1:\n size.popleft()\n position.popleft()\n for i in range(len(position)):\n position[i] -= 1\n else:\n right = 0\n left = 0\n if position[0] <= (len(size) + 2) // 2:\n size.rotate(-1)\n cnt += 1\n for i in range(len(position)):\n position[i] -= 1\n if position[i] <= 0:\n position[i] = len(size)\n else:\n size.rotate(1)\n cnt += 1\n for i in range(len(position)):\n position[i] += 1\n if position[i] > len(size):\n position[i] = 1\nprint(cnt)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\n<mask token>\nsize, num = map(int, input().split())\nposition = list(map(int, input().split()))\ncnt = 0\nnums = []\nfor k in range(1, size + 1):\n nums.append(k)\nsize = deque(nums)\nposition = deque(position)\nwhile position != deque([]):\n if position[0] == 1:\n size.popleft()\n position.popleft()\n for i in range(len(position)):\n position[i] -= 1\n else:\n right = 0\n left = 0\n if position[0] <= (len(size) + 2) // 2:\n size.rotate(-1)\n cnt += 1\n for i in range(len(position)):\n position[i] -= 1\n if position[i] <= 0:\n position[i] = len(size)\n else:\n size.rotate(1)\n cnt += 1\n for i in range(len(position)):\n position[i] += 1\n if position[i] > len(size):\n position[i] = 1\nprint(cnt)\n",
"step-4": "import sys\ninput = sys.stdin.readline\nfrom collections import deque\nsize, num = map(int, input().split())\nposition = list(map(int, input().split()))\ncnt = 0\nnums = []\nfor k in range(1, size + 1):\n nums.append(k)\nsize = deque(nums)\nposition = deque(position)\nwhile position != deque([]):\n if position[0] == 1:\n size.popleft()\n position.popleft()\n for i in range(len(position)):\n position[i] -= 1\n else:\n right = 0\n left = 0\n if position[0] <= (len(size) + 2) // 2:\n size.rotate(-1)\n cnt += 1\n for i in range(len(position)):\n position[i] -= 1\n if position[i] <= 0:\n position[i] = len(size)\n else:\n size.rotate(1)\n cnt += 1\n for i in range(len(position)):\n position[i] += 1\n if position[i] > len(size):\n position[i] = 1\nprint(cnt)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 15:19:49 2018
@author: haoyu
"""
import numpy as np
def train_test_split(X, y, test_ratio = 0.2, seed = None):
'''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''
assert X.shape[0] == y.shape[0], \
'the size of X must be equal to the size of y'
assert 0.0 <= test_ratio <=1.0, \
'test_ratio must be valid'
if seed:
np.random.seed(seed)
shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引
test_size = int(len(X) * test_ratio)
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
|
normal
|
{
"blob_id": "beda3d13e3dc12f7527f5c5ba8a0eb05c2734fd9",
"index": 6133,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_test_split(X, y, test_ratio=0.2, seed=None):\n \"\"\"将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test\"\"\"\n assert X.shape[0] == y.shape[0\n ], 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'\n if seed:\n np.random.seed(seed)\n shuffle_indexes = np.random.permutation(len(X))\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n return X_train, X_test, y_train, y_test\n",
"step-3": "<mask token>\nimport numpy as np\n\n\ndef train_test_split(X, y, test_ratio=0.2, seed=None):\n \"\"\"将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test\"\"\"\n assert X.shape[0] == y.shape[0\n ], 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'\n if seed:\n np.random.seed(seed)\n shuffle_indexes = np.random.permutation(len(X))\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n return X_train, X_test, y_train, y_test\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 4 15:19:49 2018\n\n@author: haoyu\n\"\"\"\nimport numpy as np\n\ndef train_test_split(X, y, test_ratio = 0.2, seed = None):\n '''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''\n assert X.shape[0] == y.shape[0], \\\n 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <=1.0, \\\n 'test_ratio must be valid'\n \n if seed:\n np.random.seed(seed)\n \n shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引\n\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n \n return X_train, X_test, y_train, y_test",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import sys
import json
import logging
import argparse
from glob import glob
from pricewatcher.tools import ensure_mkdir
from pricewatcher.parser.f21 import ForeverParser
from pricewatcher.parser.jcrew import JcrewParser
from pricewatcher.utils.load_es import bulk_load_es
BRAND_PARSERS={
'forever21': ForeverParser,
'jcrew': JcrewParser
}
# Set up logging
FORMAT = '[%(asctime)s][%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def run():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-base', required=True, help='')
parser.add_argument('--output-base', default='parsed_pages', help='')
parser.add_argument('--datetime', required=True, help='YYYYMMDD')
parser.add_argument('--hour', default='*', help='HH')
parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='')
parser.add_argument('--load-es', action='store_true')
parser.add_argument('--es-host', default='localhost', help='default to localhost')
parser.add_argument('--es-port', default='9200', help='default to 9200')
parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data')
args = parser.parse_args()
# Argument parsing
dt_str = args.datetime
hour_str = args.hour
brand_str = args.brand
input_base = args.input_base
output_base = args.output_base
# ES arguments
es_host, es_port = args.es_host, args.es_port
load_es = args.load_es
# Parsing Raw Pages
input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*'))
for file_path in input_files:
dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:]
parser = BRAND_PARSERS[brand_str](file_path)
parsed_docs = parser.parse()
if parsed_docs:
doc_list, price_list = parsed_docs
logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path))
if not load_es:
# Output Result
output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category))
ensure_mkdir(output_dir)
output_path = os.path.join(output_dir, filename + '.json')
logging.info('[WRITE] output to %s' % output_path)
# Dump Product List
with open(output_path + '.doc', 'w') as ofile:
ofile.write(json.dumps(doc_list, default=date_handler))
with open(output_path + '.price', 'w') as ofile:
ofile.write(json.dumps(price_list, default=date_handler))
else:
#es_index, es_doctype = br, category
logging.info('[LOAD ES] loading to ElasticSearch...')
preprocessed_list = []
for doc in doc_list:
preprocessed_list.append({ "index" : { "_index" : br, "_type" : category, "_id" : doc['product_id'] } })
preprocessed_list.append(doc)
bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None)
bulk_load_es(es_host, es_port, br, 'price', price_list)
|
normal
|
{
"blob_id": "2c22f891f30825bcb97987c78a98988ad2a92210",
"index": 385,
"step-1": "<mask token>\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-2": "<mask token>\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-3": "<mask token>\nBRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-4": "import os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom glob import glob\nfrom pricewatcher.tools import ensure_mkdir\nfrom pricewatcher.parser.f21 import ForeverParser\nfrom pricewatcher.parser.jcrew import JcrewParser\nfrom pricewatcher.utils.load_es import bulk_load_es\nBRAND_PARSERS = {'forever21': ForeverParser, 'jcrew': JcrewParser}\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(\n ), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help=\n 'default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200')\n parser.add_argument('--es-cleanup', action='store_true', help=\n 'remove index before loading new data')\n args = parser.parse_args()\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n es_host, es_port = args.es_host, args.es_port\n load_es = args.load_es\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str,\n '*', '*', '*'))\n for file_path in input_files:\n dt_str, hour_str, br, category, sub_category, filename = (file_path\n .split('/')[-6:])\n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list),\n file_path))\n if not load_es:\n output_dir = os.path.join(output_base, os.path.join(dt_str,\n hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json')\n logging.info('[WRITE] output to %s' % output_path)\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({'index': {'_index': br, '_type':\n category, '_id': doc['product_id']}})\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list,\n opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n",
"step-5": "import os\nimport sys\nimport json\nimport logging\nimport argparse\nfrom glob import glob\n\nfrom pricewatcher.tools import ensure_mkdir\nfrom pricewatcher.parser.f21 import ForeverParser\nfrom pricewatcher.parser.jcrew import JcrewParser\nfrom pricewatcher.utils.load_es import bulk_load_es\n\nBRAND_PARSERS={\n'forever21': ForeverParser, \n'jcrew': JcrewParser\n}\n\n# Set up logging\nFORMAT = '[%(asctime)s][%(levelname)s] %(message)s'\nlogging.basicConfig(format=FORMAT, datefmt='%m-%d-%Y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\ndef date_handler(obj):\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\ndef run():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--input-base', required=True, help='')\n parser.add_argument('--output-base', default='parsed_pages', help='')\n parser.add_argument('--datetime', required=True, help='YYYYMMDD')\n parser.add_argument('--hour', default='*', help='HH')\n parser.add_argument('--brand', default='*', choices=BRAND_PARSERS.keys(), help='')\n parser.add_argument('--load-es', action='store_true')\n parser.add_argument('--es-host', default='localhost', help='default to localhost')\n parser.add_argument('--es-port', default='9200', help='default to 9200') \n parser.add_argument('--es-cleanup', action='store_true', help='remove index before loading new data')\n args = parser.parse_args()\n\n # Argument parsing\n dt_str = args.datetime\n hour_str = args.hour\n brand_str = args.brand\n input_base = args.input_base\n output_base = args.output_base\n\n # ES arguments\n es_host, es_port = args.es_host, args.es_port \n load_es = args.load_es\n\n # Parsing Raw Pages\n input_files = glob(os.path.join(input_base, dt_str, hour_str, brand_str, '*', '*', '*')) \n for file_path in input_files: \n dt_str, hour_str, br, category, sub_category, filename = file_path.split('/')[-6:] \n parser = BRAND_PARSERS[brand_str](file_path)\n parsed_docs = parser.parse()\n if parsed_docs:\n doc_list, price_list = parsed_docs\n\n logging.info('[STATUS] parsed %s docs from %s' % (len(doc_list), file_path))\n if not load_es: \n # Output Result \n output_dir = os.path.join(output_base, os.path.join(dt_str, hour_str, br, category))\n ensure_mkdir(output_dir)\n output_path = os.path.join(output_dir, filename + '.json') \n logging.info('[WRITE] output to %s' % output_path)\n # Dump Product List\n with open(output_path + '.doc', 'w') as ofile:\n ofile.write(json.dumps(doc_list, default=date_handler))\n with open(output_path + '.price', 'w') as ofile:\n ofile.write(json.dumps(price_list, default=date_handler))\n else:\n #es_index, es_doctype = br, category \n logging.info('[LOAD ES] loading to ElasticSearch...')\n preprocessed_list = []\n for doc in doc_list:\n preprocessed_list.append({ \"index\" : { \"_index\" : br, \"_type\" : category, \"_id\" : doc['product_id'] } })\n preprocessed_list.append(doc)\n bulk_load_es(es_host, es_port, br, category, preprocessed_list, opt_dict=None)\n bulk_load_es(es_host, es_port, br, 'price', price_list)\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
s = input()
st = '>>-->'
st2 = '<--<<'
sch1 = sch2 = 0
i = 0
j = 0
k = -1
while i != -1:
i = s.find(st, j)
if (k != i) and (i != -1):
k = i
sch1 += 1
j += 1
j = 0
i = 0
k = -1
while i != -1:
i = s.find(st2, j)
if (k != i) and (i != -1):
k = i
sch2 += 1
j += 1
print(sch1+sch2)
|
normal
|
{
"blob_id": "c18e452592d53f22858f2307c60aa997b809c3c3",
"index": 4356,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i != -1:\n i = s.find(st, j)\n if k != i and i != -1:\n k = i\n sch1 += 1\n j += 1\n<mask token>\nwhile i != -1:\n i = s.find(st2, j)\n if k != i and i != -1:\n k = i\n sch2 += 1\n j += 1\nprint(sch1 + sch2)\n",
"step-3": "s = input()\nst = '>>-->'\nst2 = '<--<<'\nsch1 = sch2 = 0\ni = 0\nj = 0\nk = -1\nwhile i != -1:\n i = s.find(st, j)\n if k != i and i != -1:\n k = i\n sch1 += 1\n j += 1\nj = 0\ni = 0\nk = -1\nwhile i != -1:\n i = s.find(st2, j)\n if k != i and i != -1:\n k = i\n sch2 += 1\n j += 1\nprint(sch1 + sch2)\n",
"step-4": "s = input()\nst = '>>-->'\nst2 = '<--<<'\nsch1 = sch2 = 0\ni = 0\nj = 0\nk = -1\nwhile i != -1:\n i = s.find(st, j)\n if (k != i) and (i != -1):\n k = i\n sch1 += 1\n j += 1\nj = 0\ni = 0\nk = -1\nwhile i != -1:\n i = s.find(st2, j)\n if (k != i) and (i != -1):\n k = i\n sch2 += 1\n j += 1\nprint(sch1+sch2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from ..models import Empleado, Puesto, Tareas
from django.contrib.auth import login, logout
from django.contrib.auth.models import User, Group
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import EmpleadoSerializer, PuestoSerializer, TareasSerializer, UserSerializer, GroupSerializer
from rest_framework import viewsets
from . import permissions, authenticators
class EmpleadoViewSet(viewsets.ModelViewSet):
#queryset = Empleado.objects.all()
model = Empleado
serializer_class = EmpleadoSerializer
permission_classes = (permissions.IsOwner,)
def pre_save(self, obj):
#add user to object if user is logged in
if isinstance(self.request.user, User):
obj.user = self.request.user
class PuestoViewSet(viewsets.ModelViewSet):
queryset = Puesto.objects.all()
#model = Puesto
serializer_class = PuestoSerializer
permission_classes = (permissions.IsOwner,)
class TareasViewSet(viewsets.ModelViewSet):
queryset = Tareas.objects.all()
serializer_class = TareasSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
model = User
serializer_class = UserSerializer
def get_permissions(self):
#Allow non-authenticated user to create
return (AllowAny() if self.request.method == 'POST'
else permissions.IsStaffOrTargetUser()),
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AuthView(APIView):
authentication_classes = (authenticators.QuietBasicAuthentication,)
def post(self, request, *args, **kwargs):
login(request, request.user)
return Response(serializers.UserSerializer(request.user).data)
def delete(self, request, *args, **kwargs):
logout(request)
return Response()
|
normal
|
{
"blob_id": "cce85d8a34fd20c699b7a87d402b34231b0d5dbb",
"index": 3186,
"step-1": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-2": "<mask token>\n\n\nclass TareasViewSet(viewsets.ModelViewSet):\n queryset = Tareas.objects.all()\n serializer_class = TareasSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-3": "<mask token>\n\n\nclass EmpleadoViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PuestoViewSet(viewsets.ModelViewSet):\n queryset = Puesto.objects.all()\n serializer_class = PuestoSerializer\n permission_classes = permissions.IsOwner,\n\n\nclass TareasViewSet(viewsets.ModelViewSet):\n queryset = Tareas.objects.all()\n serializer_class = TareasSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-4": "<mask token>\n\n\nclass EmpleadoViewSet(viewsets.ModelViewSet):\n model = Empleado\n serializer_class = EmpleadoSerializer\n permission_classes = permissions.IsOwner,\n\n def pre_save(self, obj):\n if isinstance(self.request.user, User):\n obj.user = self.request.user\n\n\nclass PuestoViewSet(viewsets.ModelViewSet):\n queryset = Puesto.objects.all()\n serializer_class = PuestoSerializer\n permission_classes = permissions.IsOwner,\n\n\nclass TareasViewSet(viewsets.ModelViewSet):\n queryset = Tareas.objects.all()\n serializer_class = TareasSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n model = User\n serializer_class = UserSerializer\n\n def get_permissions(self):\n return (AllowAny() if self.request.method == 'POST' else\n permissions.IsStaffOrTargetUser(),)\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass AuthView(APIView):\n authentication_classes = authenticators.QuietBasicAuthentication,\n\n def post(self, request, *args, **kwargs):\n login(request, request.user)\n return Response(serializers.UserSerializer(request.user).data)\n\n def delete(self, request, *args, **kwargs):\n logout(request)\n return Response()\n",
"step-5": "from ..models import Empleado, Puesto, Tareas\r\nfrom django.contrib.auth import login, logout\r\nfrom django.contrib.auth.models import User, Group\r\nfrom rest_framework.permissions import AllowAny\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.views import APIView\r\nfrom .serializers import EmpleadoSerializer, PuestoSerializer, TareasSerializer, UserSerializer, GroupSerializer\r\nfrom rest_framework import viewsets\r\nfrom . import permissions, authenticators\r\n\r\nclass EmpleadoViewSet(viewsets.ModelViewSet):\r\n\t#queryset = Empleado.objects.all()\r\n\tmodel = Empleado\r\n\tserializer_class = EmpleadoSerializer\r\n\tpermission_classes = (permissions.IsOwner,)\r\n\r\n\tdef pre_save(self, obj):\r\n\t\t#add user to object if user is logged in\r\n\t\tif isinstance(self.request.user, User):\r\n\t\t\tobj.user = self.request.user\r\n\r\nclass PuestoViewSet(viewsets.ModelViewSet):\r\n\tqueryset = Puesto.objects.all()\r\n\t#model = Puesto\r\n\tserializer_class = PuestoSerializer\r\n\tpermission_classes = (permissions.IsOwner,)\r\n\r\nclass TareasViewSet(viewsets.ModelViewSet):\r\n\tqueryset = Tareas.objects.all()\r\n\tserializer_class = TareasSerializer\r\n\r\nclass UserViewSet(viewsets.ModelViewSet):\r\n\tqueryset = User.objects.all()\r\n\tmodel = User\r\n\tserializer_class = UserSerializer\r\n\r\n\tdef get_permissions(self):\r\n\t\t#Allow non-authenticated user to create\r\n\t\treturn (AllowAny() if self.request.method == 'POST'\r\n\t\t\telse permissions.IsStaffOrTargetUser()),\r\n\r\n\r\nclass GroupViewSet(viewsets.ModelViewSet):\r\n\tqueryset = Group.objects.all()\r\n\tserializer_class = GroupSerializer\r\n\r\nclass AuthView(APIView):\r\n\tauthentication_classes = (authenticators.QuietBasicAuthentication,)\r\n\r\n\tdef post(self, request, *args, **kwargs):\r\n\t\tlogin(request, request.user)\r\n\t\treturn Response(serializers.UserSerializer(request.user).data)\r\n\r\n\tdef delete(self, request, *args, **kwargs):\r\n\t\tlogout(request)\r\n\t\treturn Response()\r\n",
"step-ids": [
9,
11,
14,
16,
18
]
}
|
[
9,
11,
14,
16,
18
] |
def gen_metadata(fn):
metadata = {}
lines = open(fn,'r').readlines()
for line in lines:
line = line.rstrip()
if len(line) == 0:
continue
elif line.startswith('#'):
continue
elif line.startswith('%'):
continue
else:
# Special case RingThresh
firstWord = line.split()[0]
if line.startswith('RingThresh'):
if 'RingThresh' not in metadata.keys():
metadata.update({'RingThresh':{}})
strippedline = line.split(firstWord)[1].strip()
secondword = strippedline.split()[0]
metadata['RingThresh'].update({secondword:strippedline.split(secondword)[1].split('#')[0].strip()})
else:
metadata.update({firstWord : line.split(firstWord)[1].split('#')[0].strip()})
return metadata
def SetupPayloads(inp):
flow_input = {
"input": {
"inject_source_endpoint_id": inp['sourceEP'],
"funcx_endpoint_non_compute": inp['sourceNCEP'],
"proc_endpoint_non_compute": inp['procNCEP'],
"inject_source_path": inp['sourcePath'],
"inject_destination_endpoint_id": inp['remoteDataEP'],
"extract_source_endpoint_id": inp['remoteDataEP'],
"funcx_endpoint_compute": inp['funcx_endpoint_compute'],
"inject_destination_path": inp['executePath'],
"extract_source_path": inp['executeResultPath'],
"extract_destination_endpoint_id": inp['destEP'],
"extract_destination_path": inp['resultPath'],
"paramFileName": inp['pfName'],
"startLayerNr": inp['startLayerNr'],
"endLayerNr": inp['endLayerNr'],
"nFrames": inp['nFrames'],
"numProcs": inp['numProcs'],
"numBlocks": inp['numBlocks'],
"timePath": inp['timePath'],
"StartFileNrFirstLayer": inp['startNrFirstLayer'],
"NrFilesPerSweep": inp['nrFilesPerSweep'],
"FileStem": inp['fileStem'],
"SeedFolder": inp['seedFolder'],
"RawFolder": inp['rawFolder'],
"darkFN": inp['darkFN'],
"StartNr": inp['startNr'],
"EndNr": inp['endNr'],
'extract_recursive': False,
'inject_recursive': True,}
}
flow_input['input'].update({
'multipletasks':[{
'startLayerNr':inp['startLayerNr'],
'endLayerNr':inp['endLayerNr'],
'numProcs':inp['numProcs'],
'nFrames':inp['nFrames'],
'numBlocks':inp['numBlocks'],
'blockNr':idx,
'timePath':inp['timePath'],
'FileStem':inp['fileStem'],
'SeedFolder':inp['seedFolder'],
'RawFolder':inp['rawFolder'],
'paramFileName':inp['pfName'],
}
for idx in range(inp['numBlocks'])
]
})
flow_input['input'].update({
'pilot':{
'dataset':f'{inp["sourcePath"]}/{inp["fileStem"]}_Layer_{str(inp["startLayerNr"]).zfill(4)}_Analysis_Time_{inp["timePath"]}/{inp["fileStem"]}_Layer_{str(inp["startLayerNr"]).zfill(4)}_Analysis_Time_{inp["timePath"]}/',
'index':inp['portal_id'],
'project':'hedm',
'source_globus_endpoint':inp['sourceEP'],
}
})
flow_input['input']['pilot'].update({
'metadata':gen_metadata(inp['pfName']),
})
flow_input['input']['pilot']['metadata'].update({
'exp_id':f'{inp["experimentName"]}_{inp["fileStem"]}_{inp["timePath"]}',
})
flow_input['input']['pilot']['metadata'].update({
'time_path':inp["timePath"],
})
flow_input['input']['pilot']['metadata'].update({
'startNr':inp["startNr"],
'endNr':inp["endNr"],
})
return flow_input
|
normal
|
{
"blob_id": "5066c2a5219cf1b233b4985efc7a4eb494b784ca",
"index": 7363,
"step-1": "<mask token>\n",
"step-2": "def gen_metadata(fn):\n metadata = {}\n lines = open(fn, 'r').readlines()\n for line in lines:\n line = line.rstrip()\n if len(line) == 0:\n continue\n elif line.startswith('#'):\n continue\n elif line.startswith('%'):\n continue\n else:\n firstWord = line.split()[0]\n if line.startswith('RingThresh'):\n if 'RingThresh' not in metadata.keys():\n metadata.update({'RingThresh': {}})\n strippedline = line.split(firstWord)[1].strip()\n secondword = strippedline.split()[0]\n metadata['RingThresh'].update({secondword: strippedline.\n split(secondword)[1].split('#')[0].strip()})\n else:\n metadata.update({firstWord: line.split(firstWord)[1].split(\n '#')[0].strip()})\n return metadata\n\n\n<mask token>\n",
"step-3": "def gen_metadata(fn):\n metadata = {}\n lines = open(fn, 'r').readlines()\n for line in lines:\n line = line.rstrip()\n if len(line) == 0:\n continue\n elif line.startswith('#'):\n continue\n elif line.startswith('%'):\n continue\n else:\n firstWord = line.split()[0]\n if line.startswith('RingThresh'):\n if 'RingThresh' not in metadata.keys():\n metadata.update({'RingThresh': {}})\n strippedline = line.split(firstWord)[1].strip()\n secondword = strippedline.split()[0]\n metadata['RingThresh'].update({secondword: strippedline.\n split(secondword)[1].split('#')[0].strip()})\n else:\n metadata.update({firstWord: line.split(firstWord)[1].split(\n '#')[0].strip()})\n return metadata\n\n\ndef SetupPayloads(inp):\n flow_input = {'input': {'inject_source_endpoint_id': inp['sourceEP'],\n 'funcx_endpoint_non_compute': inp['sourceNCEP'],\n 'proc_endpoint_non_compute': inp['procNCEP'], 'inject_source_path':\n inp['sourcePath'], 'inject_destination_endpoint_id': inp[\n 'remoteDataEP'], 'extract_source_endpoint_id': inp['remoteDataEP'],\n 'funcx_endpoint_compute': inp['funcx_endpoint_compute'],\n 'inject_destination_path': inp['executePath'],\n 'extract_source_path': inp['executeResultPath'],\n 'extract_destination_endpoint_id': inp['destEP'],\n 'extract_destination_path': inp['resultPath'], 'paramFileName': inp\n ['pfName'], 'startLayerNr': inp['startLayerNr'], 'endLayerNr': inp[\n 'endLayerNr'], 'nFrames': inp['nFrames'], 'numProcs': inp[\n 'numProcs'], 'numBlocks': inp['numBlocks'], 'timePath': inp[\n 'timePath'], 'StartFileNrFirstLayer': inp['startNrFirstLayer'],\n 'NrFilesPerSweep': inp['nrFilesPerSweep'], 'FileStem': inp[\n 'fileStem'], 'SeedFolder': inp['seedFolder'], 'RawFolder': inp[\n 'rawFolder'], 'darkFN': inp['darkFN'], 'StartNr': inp['startNr'],\n 'EndNr': inp['endNr'], 'extract_recursive': False,\n 'inject_recursive': True}}\n flow_input['input'].update({'multipletasks': [{'startLayerNr': inp[\n 'startLayerNr'], 'endLayerNr': inp['endLayerNr'], 'numProcs': inp[\n 'numProcs'], 'nFrames': inp['nFrames'], 'numBlocks': inp[\n 'numBlocks'], 'blockNr': idx, 'timePath': inp['timePath'],\n 'FileStem': inp['fileStem'], 'SeedFolder': inp['seedFolder'],\n 'RawFolder': inp['rawFolder'], 'paramFileName': inp['pfName']} for\n idx in range(inp['numBlocks'])]})\n flow_input['input'].update({'pilot': {'dataset':\n f\"{inp['sourcePath']}/{inp['fileStem']}_Layer_{str(inp['startLayerNr']).zfill(4)}_Analysis_Time_{inp['timePath']}/{inp['fileStem']}_Layer_{str(inp['startLayerNr']).zfill(4)}_Analysis_Time_{inp['timePath']}/\"\n , 'index': inp['portal_id'], 'project': 'hedm',\n 'source_globus_endpoint': inp['sourceEP']}})\n flow_input['input']['pilot'].update({'metadata': gen_metadata(inp[\n 'pfName'])})\n flow_input['input']['pilot']['metadata'].update({'exp_id':\n f\"{inp['experimentName']}_{inp['fileStem']}_{inp['timePath']}\"})\n flow_input['input']['pilot']['metadata'].update({'time_path': inp[\n 'timePath']})\n flow_input['input']['pilot']['metadata'].update({'startNr': inp[\n 'startNr'], 'endNr': inp['endNr']})\n return flow_input\n",
"step-4": "def gen_metadata(fn):\n\tmetadata = {}\n\tlines = open(fn,'r').readlines()\n\tfor line in lines:\n\t\tline = line.rstrip()\n\t\tif len(line) == 0:\n\t\t\tcontinue\n\t\telif line.startswith('#'):\n\t\t\tcontinue\n\t\telif line.startswith('%'):\n\t\t\tcontinue\n\t\telse:\n\t\t\t# Special case RingThresh\n\t\t\tfirstWord = line.split()[0]\n\t\t\tif line.startswith('RingThresh'):\n\t\t\t\tif 'RingThresh' not in metadata.keys():\n\t\t\t\t\tmetadata.update({'RingThresh':{}})\n\t\t\t\tstrippedline = line.split(firstWord)[1].strip()\n\t\t\t\tsecondword = strippedline.split()[0]\n\t\t\t\tmetadata['RingThresh'].update({secondword:strippedline.split(secondword)[1].split('#')[0].strip()})\n\t\t\telse:\n\t\t\t\tmetadata.update({firstWord : line.split(firstWord)[1].split('#')[0].strip()})\n\treturn metadata\n\ndef SetupPayloads(inp):\n\tflow_input = {\n\t\t\"input\": {\n\t\t\t\"inject_source_endpoint_id\":\t\tinp['sourceEP'],\n\t\t\t\"funcx_endpoint_non_compute\":\t\tinp['sourceNCEP'],\n\t\t\t\"proc_endpoint_non_compute\":\t\tinp['procNCEP'],\n\t\t\t\"inject_source_path\":\t\t\t\tinp['sourcePath'],\n\t\t\t\"inject_destination_endpoint_id\":\tinp['remoteDataEP'],\n\t\t\t\"extract_source_endpoint_id\":\t\tinp['remoteDataEP'],\n\t\t\t\"funcx_endpoint_compute\":\t\t\tinp['funcx_endpoint_compute'],\n\t\t\t\"inject_destination_path\":\t\t\tinp['executePath'],\n\t\t\t\"extract_source_path\":\t\t\t\tinp['executeResultPath'],\n\t\t\t\"extract_destination_endpoint_id\":\tinp['destEP'],\n\t\t\t\"extract_destination_path\":\t\t\tinp['resultPath'],\n\t\t\t\"paramFileName\":\t\t\t\t\tinp['pfName'],\n\t\t\t\"startLayerNr\":\t\t\t\t\t\tinp['startLayerNr'],\n\t\t\t\"endLayerNr\":\t\t\t\t\t\tinp['endLayerNr'],\n\t\t\t\"nFrames\":\t\t\t\t\t\t\tinp['nFrames'],\n\t\t\t\"numProcs\":\t\t\t\t\t\t\tinp['numProcs'],\n\t\t\t\"numBlocks\":\t\t\t\t\t\tinp['numBlocks'],\n\t\t\t\"timePath\":\t\t\t\t\t\t\tinp['timePath'],\n\t\t\t\"StartFileNrFirstLayer\":\t\t\tinp['startNrFirstLayer'],\n\t\t\t\"NrFilesPerSweep\":\t\t\t\t\tinp['nrFilesPerSweep'],\n\t\t\t\"FileStem\":\t\t\t\t\t\t\tinp['fileStem'],\n\t\t\t\"SeedFolder\":\t\t\t\t\t\tinp['seedFolder'],\n\t\t\t\"RawFolder\":\t\t\t\t\t\tinp['rawFolder'],\n\t\t\t\"darkFN\":\t\t\t\t\t\t\tinp['darkFN'],\n\t\t\t\"StartNr\":\t\t\t\t\t\t\tinp['startNr'],\n\t\t\t\"EndNr\":\t\t\t\t\t\t\tinp['endNr'],\n\t\t\t'extract_recursive':\t\t\t\tFalse,\n\t\t\t'inject_recursive':\t\t\t\t\tTrue,}\n\t\t}\n\tflow_input['input'].update({\n\t\t\t'multipletasks':[{\n\t\t\t\t'startLayerNr':inp['startLayerNr'],\n\t\t\t\t'endLayerNr':inp['endLayerNr'],\n\t\t\t\t'numProcs':inp['numProcs'],\n\t\t\t\t'nFrames':inp['nFrames'],\n\t\t\t\t'numBlocks':inp['numBlocks'],\n\t\t\t\t'blockNr':idx,\n\t\t\t\t'timePath':inp['timePath'],\n\t\t\t\t'FileStem':inp['fileStem'],\n\t\t\t\t'SeedFolder':inp['seedFolder'],\n\t\t\t\t'RawFolder':inp['rawFolder'],\n\t\t\t\t'paramFileName':inp['pfName'],\n\t\t\t\t}\n\t\t\tfor idx in range(inp['numBlocks'])\n\t\t]\n\t})\n\tflow_input['input'].update({\n\t\t'pilot':{\n\t\t\t'dataset':f'{inp[\"sourcePath\"]}/{inp[\"fileStem\"]}_Layer_{str(inp[\"startLayerNr\"]).zfill(4)}_Analysis_Time_{inp[\"timePath\"]}/{inp[\"fileStem\"]}_Layer_{str(inp[\"startLayerNr\"]).zfill(4)}_Analysis_Time_{inp[\"timePath\"]}/',\n\t\t\t'index':inp['portal_id'],\n\t\t\t'project':'hedm',\n\t\t\t'source_globus_endpoint':inp['sourceEP'],\n\t\t}\n\t})\n\t\n\tflow_input['input']['pilot'].update({\n\t\t'metadata':gen_metadata(inp['pfName']),\n\t})\n\tflow_input['input']['pilot']['metadata'].update({\n\t\t'exp_id':f'{inp[\"experimentName\"]}_{inp[\"fileStem\"]}_{inp[\"timePath\"]}',\n\t})\n\tflow_input['input']['pilot']['metadata'].update({\n\t\t'time_path':inp[\"timePath\"],\n\t})\n\tflow_input['input']['pilot']['metadata'].update({\n\t\t'startNr':inp[\"startNr\"],\n\t\t'endNr':inp[\"endNr\"],\n\t})\n\treturn flow_input\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
def dir_slash():
slash = '/'
if 'win' in sys.platform:
slash = '\\'
return slash
|
normal
|
{
"blob_id": "b12c8d0cb1cd1e48df6246fe3f16467b2db296e0",
"index": 745,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef dir_slash():\n slash = '/'\n if 'win' in sys.platform:\n slash = '\\\\'\n return slash\n",
"step-3": "import sys\n\n\ndef dir_slash():\n slash = '/'\n if 'win' in sys.platform:\n slash = '\\\\'\n return slash\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
n = 1
ip = []
ma = []
l = [0, 0, 0, 0, 0, 0, 0] # a, b, c, d, e, wpm, pr
while n != 0:
a = input().strip().split("~")
n = len(a)
if n == 1:
break
ip.append(a[0])
ma.append(a[1])
for i in ip:
ipn = i.split(".")
try:
if 1 <= int(ipn[0]) <= 126:
p = 0
elif 128 <= int(ipn[0]) <= 191:
p = 1
elif 192 <= int(ipn[0]) <= 223:
p = 2
elif 224 <= int(ipn[0]) <= 239:
p = 3
elif 240 <= int(ipn(0)) <= 255:
p = 4
elif int(ipn[0]) == 0 or 127:
continue
if 0 <= int(ipn[1]) <= 255:
if int(ipn[0]) == 10:
p = 6
elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:
p = 6
elif int(ipn[0]) == 192 and int(ipn[1]) == 168:
p = 6
if 0 <= int(ipn[2]) <= 255:
if 0 <= int(ipn[3]) <= 255:
l[p] += 1
else:
l[5] += 1
else:
l[5] += 1
else:
l[5] += 1
except:
l[5] += 1
for m in ma:
mn = m.split(".")
b = bin(int(''.join(mn)))
le = b.find("0")
ri = b.rfind("1")
if le > ri:
l[5] += 1
for o in l:
print(str(o),end=" ")
|
normal
|
{
"blob_id": "4a13f05fbbe598242f5663d27d578d2eb977e103",
"index": 6137,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile n != 0:\n a = input().strip().split('~')\n n = len(a)\n if n == 1:\n break\n ip.append(a[0])\n ma.append(a[1])\nfor i in ip:\n ipn = i.split('.')\n try:\n if 1 <= int(ipn[0]) <= 126:\n p = 0\n elif 128 <= int(ipn[0]) <= 191:\n p = 1\n elif 192 <= int(ipn[0]) <= 223:\n p = 2\n elif 224 <= int(ipn[0]) <= 239:\n p = 3\n elif 240 <= int(ipn(0)) <= 255:\n p = 4\n elif int(ipn[0]) == 0 or 127:\n continue\n if 0 <= int(ipn[1]) <= 255:\n if int(ipn[0]) == 10:\n p = 6\n elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:\n p = 6\n elif int(ipn[0]) == 192 and int(ipn[1]) == 168:\n p = 6\n if 0 <= int(ipn[2]) <= 255:\n if 0 <= int(ipn[3]) <= 255:\n l[p] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n except:\n l[5] += 1\nfor m in ma:\n mn = m.split('.')\n b = bin(int(''.join(mn)))\n le = b.find('0')\n ri = b.rfind('1')\n if le > ri:\n l[5] += 1\nfor o in l:\n print(str(o), end=' ')\n",
"step-3": "n = 1\nip = []\nma = []\nl = [0, 0, 0, 0, 0, 0, 0]\nwhile n != 0:\n a = input().strip().split('~')\n n = len(a)\n if n == 1:\n break\n ip.append(a[0])\n ma.append(a[1])\nfor i in ip:\n ipn = i.split('.')\n try:\n if 1 <= int(ipn[0]) <= 126:\n p = 0\n elif 128 <= int(ipn[0]) <= 191:\n p = 1\n elif 192 <= int(ipn[0]) <= 223:\n p = 2\n elif 224 <= int(ipn[0]) <= 239:\n p = 3\n elif 240 <= int(ipn(0)) <= 255:\n p = 4\n elif int(ipn[0]) == 0 or 127:\n continue\n if 0 <= int(ipn[1]) <= 255:\n if int(ipn[0]) == 10:\n p = 6\n elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:\n p = 6\n elif int(ipn[0]) == 192 and int(ipn[1]) == 168:\n p = 6\n if 0 <= int(ipn[2]) <= 255:\n if 0 <= int(ipn[3]) <= 255:\n l[p] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n except:\n l[5] += 1\nfor m in ma:\n mn = m.split('.')\n b = bin(int(''.join(mn)))\n le = b.find('0')\n ri = b.rfind('1')\n if le > ri:\n l[5] += 1\nfor o in l:\n print(str(o), end=' ')\n",
"step-4": "n = 1\nip = []\nma = []\nl = [0, 0, 0, 0, 0, 0, 0] # a, b, c, d, e, wpm, pr\nwhile n != 0:\n a = input().strip().split(\"~\")\n n = len(a)\n if n == 1:\n break\n ip.append(a[0])\n ma.append(a[1])\n\nfor i in ip:\n ipn = i.split(\".\")\n try:\n if 1 <= int(ipn[0]) <= 126:\n p = 0\n elif 128 <= int(ipn[0]) <= 191:\n p = 1\n elif 192 <= int(ipn[0]) <= 223:\n p = 2\n elif 224 <= int(ipn[0]) <= 239:\n p = 3\n elif 240 <= int(ipn(0)) <= 255:\n p = 4\n elif int(ipn[0]) == 0 or 127:\n continue\n if 0 <= int(ipn[1]) <= 255:\n if int(ipn[0]) == 10:\n p = 6\n elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:\n p = 6\n elif int(ipn[0]) == 192 and int(ipn[1]) == 168:\n p = 6\n if 0 <= int(ipn[2]) <= 255:\n if 0 <= int(ipn[3]) <= 255:\n l[p] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n except:\n l[5] += 1\n \nfor m in ma:\n mn = m.split(\".\")\n b = bin(int(''.join(mn)))\n le = b.find(\"0\")\n ri = b.rfind(\"1\")\n if le > ri:\n l[5] += 1\n\nfor o in l:\n print(str(o),end=\" \")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import copy
import sys
def mutate(genotype_in, mut_matrix):
genotype_out = np.zeros(8)
for i in range(8):
rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=mut_matrix[i,:])
genotype_out+=np.bincount(rand_vec, minlength=8)
return(genotype_out)
def propagate(genotype_in, fitness):
genotype_out = np.zeros(8)
pop_size = genotype_in.sum(dtype=int)
freq_vec = fitness*genotype_in
rand_vec = np.random.choice(8, size=pop_size, p=freq_vec/freq_vec.sum())
genotype_out = np.bincount(rand_vec, minlength=8)
return(genotype_out)
def get_mean_fitness(gt, fitness):
return(np.sum(gt*fitness, axis=1))
def get_mean_fitness3(gt, fitness):
return(np.dot(gt, fitness))
def get_gene_freqs(gt):
gene_freq = np.zeros((gt.shape[0], 3))
for i in range(8):
bin_i = np.binary_repr(i, width=3)
for j in range(3):
if bin_i[j] =='1':
gene_freq[:,j] += gt[:,i]
return(gene_freq)
def get_gene_freqs3(gt):
gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))
for i in range(8):
bin_i = np.binary_repr(i, width=3)
for j in range(3):
if bin_i[j] =='1':
gene_freq[:,:,j] += gt[:,:,i]
return(gene_freq)
def convert_mut(mp):
# convert 2x3 mutation matrix into 8x8
mut_matrix = np.zeros((8,8))
for i in range(8):
bin_i = np.binary_repr(i, width=3)
for j in range(8):
bin_j = np.binary_repr(j, width=3)
p = 1
for k in range(3):
if int(bin_i[k])>int(bin_j[k]):
p*=mp[1,k]
elif int(bin_i[k])<int(bin_j[k]):
p*=mp[0,k]
mut_matrix[i,j] = p
mut_matrix[i,i] = 2-np.sum(mut_matrix[i,:])
return(mut_matrix)
def convert_fitness(fitness):
# convert 2x2x2 fitness matrix to vector
fitness_vec = np.zeros(8)
for i in range(8):
i_bin = np.binary_repr(i, 3)
j = int(i_bin[0])
k = int(i_bin[1])
l = int(i_bin[2])
fitness_vec[i] = fitness[j,k,l]
return(fitness_vec)
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def run_simulation_parallel(n, gt_in, params, label):
np.random.seed()
generations = params['generations']
gt = np.zeros((generations, 8))
mut_prob = convert_mut(params['mut_prob'][label])
fitness = convert_fitness(params['fitness'][label])
gt[0,:] = gt_in
for i in progressbar(range(1, params['generations']), "Repeat "+str(n+1), 40):
gt_mut = mutate(gt[i-1,:], mut_prob)
gt[i,:] = propagate(gt_mut, fitness)
return(gt)
|
normal
|
{
"blob_id": "9065842a8e90c833278547310f027bc63c7a9a47",
"index": 7557,
"step-1": "<mask token>\n\n\ndef mutate(genotype_in, mut_matrix):\n genotype_out = np.zeros(8)\n for i in range(8):\n rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=\n mut_matrix[i, :])\n genotype_out += np.bincount(rand_vec, minlength=8)\n return genotype_out\n\n\n<mask token>\n\n\ndef get_mean_fitness(gt, fitness):\n return np.sum(gt * fitness, axis=1)\n\n\ndef get_mean_fitness3(gt, fitness):\n return np.dot(gt, fitness)\n\n\ndef get_gene_freqs(gt):\n gene_freq = np.zeros((gt.shape[0], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, j] += gt[:, i]\n return gene_freq\n\n\ndef get_gene_freqs3(gt):\n gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, :, j] += gt[:, :, i]\n return gene_freq\n\n\n<mask token>\n\n\ndef progressbar(it, prefix='', size=60, file=sys.stdout):\n count = len(it)\n\n def show(j):\n x = int(size * j / count)\n file.write('%s[%s%s] %i/%i\\r' % (prefix, '#' * x, '.' * (size - x),\n j, count))\n file.flush()\n show(0)\n for i, item in enumerate(it):\n yield item\n show(i + 1)\n file.write('\\n')\n file.flush()\n\n\ndef run_simulation_parallel(n, gt_in, params, label):\n np.random.seed()\n generations = params['generations']\n gt = np.zeros((generations, 8))\n mut_prob = convert_mut(params['mut_prob'][label])\n fitness = convert_fitness(params['fitness'][label])\n gt[0, :] = gt_in\n for i in progressbar(range(1, params['generations']), 'Repeat ' + str(n +\n 1), 40):\n gt_mut = mutate(gt[i - 1, :], mut_prob)\n gt[i, :] = propagate(gt_mut, fitness)\n return gt\n",
"step-2": "<mask token>\n\n\ndef mutate(genotype_in, mut_matrix):\n genotype_out = np.zeros(8)\n for i in range(8):\n rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=\n mut_matrix[i, :])\n genotype_out += np.bincount(rand_vec, minlength=8)\n return genotype_out\n\n\n<mask token>\n\n\ndef get_mean_fitness(gt, fitness):\n return np.sum(gt * fitness, axis=1)\n\n\ndef get_mean_fitness3(gt, fitness):\n return np.dot(gt, fitness)\n\n\ndef get_gene_freqs(gt):\n gene_freq = np.zeros((gt.shape[0], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, j] += gt[:, i]\n return gene_freq\n\n\ndef get_gene_freqs3(gt):\n gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, :, j] += gt[:, :, i]\n return gene_freq\n\n\n<mask token>\n\n\ndef convert_fitness(fitness):\n fitness_vec = np.zeros(8)\n for i in range(8):\n i_bin = np.binary_repr(i, 3)\n j = int(i_bin[0])\n k = int(i_bin[1])\n l = int(i_bin[2])\n fitness_vec[i] = fitness[j, k, l]\n return fitness_vec\n\n\ndef progressbar(it, prefix='', size=60, file=sys.stdout):\n count = len(it)\n\n def show(j):\n x = int(size * j / count)\n file.write('%s[%s%s] %i/%i\\r' % (prefix, '#' * x, '.' * (size - x),\n j, count))\n file.flush()\n show(0)\n for i, item in enumerate(it):\n yield item\n show(i + 1)\n file.write('\\n')\n file.flush()\n\n\ndef run_simulation_parallel(n, gt_in, params, label):\n np.random.seed()\n generations = params['generations']\n gt = np.zeros((generations, 8))\n mut_prob = convert_mut(params['mut_prob'][label])\n fitness = convert_fitness(params['fitness'][label])\n gt[0, :] = gt_in\n for i in progressbar(range(1, params['generations']), 'Repeat ' + str(n +\n 1), 40):\n gt_mut = mutate(gt[i - 1, :], mut_prob)\n gt[i, :] = propagate(gt_mut, fitness)\n return gt\n",
"step-3": "<mask token>\n\n\ndef mutate(genotype_in, mut_matrix):\n genotype_out = np.zeros(8)\n for i in range(8):\n rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=\n mut_matrix[i, :])\n genotype_out += np.bincount(rand_vec, minlength=8)\n return genotype_out\n\n\ndef propagate(genotype_in, fitness):\n genotype_out = np.zeros(8)\n pop_size = genotype_in.sum(dtype=int)\n freq_vec = fitness * genotype_in\n rand_vec = np.random.choice(8, size=pop_size, p=freq_vec / freq_vec.sum())\n genotype_out = np.bincount(rand_vec, minlength=8)\n return genotype_out\n\n\ndef get_mean_fitness(gt, fitness):\n return np.sum(gt * fitness, axis=1)\n\n\ndef get_mean_fitness3(gt, fitness):\n return np.dot(gt, fitness)\n\n\ndef get_gene_freqs(gt):\n gene_freq = np.zeros((gt.shape[0], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, j] += gt[:, i]\n return gene_freq\n\n\ndef get_gene_freqs3(gt):\n gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, :, j] += gt[:, :, i]\n return gene_freq\n\n\ndef convert_mut(mp):\n mut_matrix = np.zeros((8, 8))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(8):\n bin_j = np.binary_repr(j, width=3)\n p = 1\n for k in range(3):\n if int(bin_i[k]) > int(bin_j[k]):\n p *= mp[1, k]\n elif int(bin_i[k]) < int(bin_j[k]):\n p *= mp[0, k]\n mut_matrix[i, j] = p\n mut_matrix[i, i] = 2 - np.sum(mut_matrix[i, :])\n return mut_matrix\n\n\ndef convert_fitness(fitness):\n fitness_vec = np.zeros(8)\n for i in range(8):\n i_bin = np.binary_repr(i, 3)\n j = int(i_bin[0])\n k = int(i_bin[1])\n l = int(i_bin[2])\n fitness_vec[i] = fitness[j, k, l]\n return fitness_vec\n\n\ndef progressbar(it, prefix='', size=60, file=sys.stdout):\n count = len(it)\n\n def show(j):\n x = int(size * j / count)\n file.write('%s[%s%s] %i/%i\\r' % (prefix, '#' * x, '.' * (size - x),\n j, count))\n file.flush()\n show(0)\n for i, item in enumerate(it):\n yield item\n show(i + 1)\n file.write('\\n')\n file.flush()\n\n\ndef run_simulation_parallel(n, gt_in, params, label):\n np.random.seed()\n generations = params['generations']\n gt = np.zeros((generations, 8))\n mut_prob = convert_mut(params['mut_prob'][label])\n fitness = convert_fitness(params['fitness'][label])\n gt[0, :] = gt_in\n for i in progressbar(range(1, params['generations']), 'Repeat ' + str(n +\n 1), 40):\n gt_mut = mutate(gt[i - 1, :], mut_prob)\n gt[i, :] = propagate(gt_mut, fitness)\n return gt\n",
"step-4": "import numpy as np\nimport copy\nimport sys\n\n\ndef mutate(genotype_in, mut_matrix):\n genotype_out = np.zeros(8)\n for i in range(8):\n rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=\n mut_matrix[i, :])\n genotype_out += np.bincount(rand_vec, minlength=8)\n return genotype_out\n\n\ndef propagate(genotype_in, fitness):\n genotype_out = np.zeros(8)\n pop_size = genotype_in.sum(dtype=int)\n freq_vec = fitness * genotype_in\n rand_vec = np.random.choice(8, size=pop_size, p=freq_vec / freq_vec.sum())\n genotype_out = np.bincount(rand_vec, minlength=8)\n return genotype_out\n\n\ndef get_mean_fitness(gt, fitness):\n return np.sum(gt * fitness, axis=1)\n\n\ndef get_mean_fitness3(gt, fitness):\n return np.dot(gt, fitness)\n\n\ndef get_gene_freqs(gt):\n gene_freq = np.zeros((gt.shape[0], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, j] += gt[:, i]\n return gene_freq\n\n\ndef get_gene_freqs3(gt):\n gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] == '1':\n gene_freq[:, :, j] += gt[:, :, i]\n return gene_freq\n\n\ndef convert_mut(mp):\n mut_matrix = np.zeros((8, 8))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(8):\n bin_j = np.binary_repr(j, width=3)\n p = 1\n for k in range(3):\n if int(bin_i[k]) > int(bin_j[k]):\n p *= mp[1, k]\n elif int(bin_i[k]) < int(bin_j[k]):\n p *= mp[0, k]\n mut_matrix[i, j] = p\n mut_matrix[i, i] = 2 - np.sum(mut_matrix[i, :])\n return mut_matrix\n\n\ndef convert_fitness(fitness):\n fitness_vec = np.zeros(8)\n for i in range(8):\n i_bin = np.binary_repr(i, 3)\n j = int(i_bin[0])\n k = int(i_bin[1])\n l = int(i_bin[2])\n fitness_vec[i] = fitness[j, k, l]\n return fitness_vec\n\n\ndef progressbar(it, prefix='', size=60, file=sys.stdout):\n count = len(it)\n\n def show(j):\n x = int(size * j / count)\n file.write('%s[%s%s] %i/%i\\r' % (prefix, '#' * x, '.' * (size - x),\n j, count))\n file.flush()\n show(0)\n for i, item in enumerate(it):\n yield item\n show(i + 1)\n file.write('\\n')\n file.flush()\n\n\ndef run_simulation_parallel(n, gt_in, params, label):\n np.random.seed()\n generations = params['generations']\n gt = np.zeros((generations, 8))\n mut_prob = convert_mut(params['mut_prob'][label])\n fitness = convert_fitness(params['fitness'][label])\n gt[0, :] = gt_in\n for i in progressbar(range(1, params['generations']), 'Repeat ' + str(n +\n 1), 40):\n gt_mut = mutate(gt[i - 1, :], mut_prob)\n gt[i, :] = propagate(gt_mut, fitness)\n return gt\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport copy\nimport sys\n\n\ndef mutate(genotype_in, mut_matrix):\n genotype_out = np.zeros(8)\n for i in range(8):\n rand_vec = np.random.choice(8, size=int(genotype_in[i]), p=mut_matrix[i,:])\n genotype_out+=np.bincount(rand_vec, minlength=8)\n return(genotype_out)\n\ndef propagate(genotype_in, fitness):\n genotype_out = np.zeros(8)\n pop_size = genotype_in.sum(dtype=int)\n freq_vec = fitness*genotype_in\n rand_vec = np.random.choice(8, size=pop_size, p=freq_vec/freq_vec.sum())\n genotype_out = np.bincount(rand_vec, minlength=8)\n return(genotype_out)\n\ndef get_mean_fitness(gt, fitness):\n return(np.sum(gt*fitness, axis=1))\n\ndef get_mean_fitness3(gt, fitness):\n return(np.dot(gt, fitness))\n\ndef get_gene_freqs(gt):\n gene_freq = np.zeros((gt.shape[0], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] =='1':\n gene_freq[:,j] += gt[:,i]\n return(gene_freq)\n \ndef get_gene_freqs3(gt):\n gene_freq = np.zeros((gt.shape[0], gt.shape[1], 3))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(3):\n if bin_i[j] =='1':\n gene_freq[:,:,j] += gt[:,:,i]\n return(gene_freq)\n \ndef convert_mut(mp):\n # convert 2x3 mutation matrix into 8x8\n mut_matrix = np.zeros((8,8))\n for i in range(8):\n bin_i = np.binary_repr(i, width=3)\n for j in range(8):\n bin_j = np.binary_repr(j, width=3) \n p = 1 \n for k in range(3):\n if int(bin_i[k])>int(bin_j[k]):\n p*=mp[1,k]\n elif int(bin_i[k])<int(bin_j[k]):\n p*=mp[0,k]\n mut_matrix[i,j] = p\n mut_matrix[i,i] = 2-np.sum(mut_matrix[i,:])\n return(mut_matrix)\n\ndef convert_fitness(fitness):\n # convert 2x2x2 fitness matrix to vector\n fitness_vec = np.zeros(8)\n for i in range(8):\n i_bin = np.binary_repr(i, 3)\n j = int(i_bin[0])\n k = int(i_bin[1])\n l = int(i_bin[2])\n fitness_vec[i] = fitness[j,k,l]\n return(fitness_vec)\n\ndef progressbar(it, prefix=\"\", size=60, file=sys.stdout):\n count = len(it)\n def show(j):\n x = int(size*j/count)\n file.write(\"%s[%s%s] %i/%i\\r\" % (prefix, \"#\"*x, \".\"*(size-x), j, count))\n file.flush() \n show(0)\n for i, item in enumerate(it):\n yield item\n show(i+1)\n file.write(\"\\n\")\n file.flush()\n \ndef run_simulation_parallel(n, gt_in, params, label):\n np.random.seed()\n generations = params['generations']\n gt = np.zeros((generations, 8))\n mut_prob = convert_mut(params['mut_prob'][label])\n fitness = convert_fitness(params['fitness'][label]) \n gt[0,:] = gt_in\n for i in progressbar(range(1, params['generations']), \"Repeat \"+str(n+1), 40): \n gt_mut = mutate(gt[i-1,:], mut_prob)\n gt[i,:] = propagate(gt_mut, fitness)\n return(gt)",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
|
normal
|
{
"blob_id": "5869669f1e3f648c0ddc68683f0b1d2754b40169",
"index": 8714,
"step-1": "<mask token>\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\n<mask token>\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-2": "<mask token>\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-3": "<mask token>\n\n\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-4": "<mask token>\nINFO_DB_SUCCESS = 'Database updated successfully!'\nERROR_DB_ERROR = 'Error: Unable to open database for writing'\nERROR_DB_NOT_FOUND = (\n 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'\n )\nERROR_PLAYER_NOT_FOUND = (\n 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'\n )\nERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'\nERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'\nERROR_IN_DB = 'Error: \"%s\" is already in the database'\nERROR_SORT_ERROR = \"\"\"Error while sorting list. Make sure all players have at least one win or loss.\n\"\"\"\nERROR_INVALID_SORT = (\n 'Error: Invalid sorting type. Displaying stats as stored.\\n')\n\n\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-5": "import discord\nfrom collections import Counter\nfrom db import readDB, writeDB\n\n\nINFO_DB_SUCCESS = 'Database updated successfully!'\nERROR_DB_ERROR = 'Error: Unable to open database for writing'\nERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'\n\nERROR_PLAYER_NOT_FOUND = 'Error: \\\"%s\\\" not found in database. Check your spelling or use !addplayer first.'\nERROR_WIN_IN_LOSE = 'Error: \\\"%s\\\" already specified as winner.'\nERROR_DUP_LOSER = 'Error: \\\"%s\\\" duplicated in losers list'\n\nERROR_IN_DB = 'Error: \\\"%s\\\" is already in the database'\n\nERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\\n'\nERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\\n'\n\n\n# desc: function to search a list of lists for a name\n# args: name - the name to search the lists for\n# searchList - a list of lists to search for a name\n# retn: the index of the list containing the name or -1 if not found\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\n# desc: function to round a number up to a specific increment. for example,\n# rounding 11 to the nearest multiple of 2 would result in 12\n# args: num - the number to round up\n# multiple - the increment to round to\n# retn: the rounded number\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - (num % multiple))\n return num\n\n\n# desc: function to find duplicate items in a list\n# args: inputList - a list to search for duplicates\n# retn: a list containing the duplicates\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\n# desc: function to update the database\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# winner - a string containing the winner's name\n# losers - a list of strings containing the losers' names\n# retn: a string indicating success or failure\ndef incrementStats(msgChannel, statsFile, winner, losers):\n # read the database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n\n # check if the winner is actually in the database\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \\\"%s\\\" not found in database' % winner)\n return (ERROR_PLAYER_NOT_FOUND % winner)\n\n # check if losers are in database\n for loser in losers:\n # get loser index\n loserIndex = getIndex(loser, rows)\n\n # check against winner to see if the name was duplicated\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return (ERROR_WIN_IN_LOSE % loser)\n # check if loser was not found in database\n if loserIndex < 0:\n print('[ERROR] Loser \\\"%s\\\" not found in database' % loser)\n return (ERROR_PLAYER_NOT_FOUND % loser)\n\n # check for duplicate losers\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return (ERROR_DUP_LOSER % dupList)\n\n # update stats if we found the winner and all losers\n # get index, get win count, increment and update\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n\n # same as winner for each loser\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\n# desc: function to add a player to the database or edit an existing player\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# player - the name of the player to either add to the db or edit\n# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening\n# wins - the number of wins to assign the player\n# losses - the number of losses to assign the player\n# retn: a string indicating success or failure\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n # open up the database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n\n # check if player is already in database\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \\\"%s\\\" already in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_IN_DB % player)\n else:\n # add player to list and resort\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \\\"%s\\\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \\\"%s\\\" not found in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_PLAYER_NOT_FOUND % player)\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] %s\\'s data changed' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \\\"%s\\\" not found in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_PLAYER_NOT_FOUND % player)\n else:\n # delete player from list\n del(rows[playerIndex])\n # write the new data to the database\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \\\"%s\\\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\n# desc: function to display the stats\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# sortType - the order in which the results should be sorted.\n# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.\n# will revert to 'NAME' if invalid\n# player - NOT IMPLEMENTED - the player to display stats for\n# retn: a string formatted with the database stats\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n # read database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n # sort data by win rate\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print('[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n # sort by number of wins and reverse so max is first\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n # sort by number of losses and reverse so max is first\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n # database is stored sorted by name so dont do anything\n pass\n else:\n print('[ERROR] Invalid sorting type specified. Displaying stats as stored')\n returnMsg = ERROR_INVALID_SORT\n\n if player == 'ALL':\n # get max player length\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n\n # construct a string with all the player info\n playerString = ''\n # adjust start spacing if player length is odd or even to align with pipe\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n # calculate win rate\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)\n\n # truncate win rate and create string with player info\n winRate = winRate[0:4].rjust(9)\n playerString += playerName + winCount + loseCount + winRate + ' %\\n'\n\n # calculate padding for name field and create header final strings\n namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)\n header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\\n'\n divider = ('-' * len(header)) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n\n # return the constructed string\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from ...utils import minversion
__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11',
'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14',
'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')
NUMPY_LT_1_11 = not minversion('numpy', '1.11.0')
NUMPY_LT_1_12 = not minversion('numpy', '1.12')
NUMPY_LT_1_13 = not minversion('numpy', '1.13')
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
|
normal
|
{
"blob_id": "9376d697158faf91f066a88e87d317e79a4d9240",
"index": 6575,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_12',\n 'NUMPY_LT_1_13', 'NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']\nNUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')\nNUMPY_LT_1_11 = not minversion('numpy', '1.11.0')\nNUMPY_LT_1_12 = not minversion('numpy', '1.12')\nNUMPY_LT_1_13 = not minversion('numpy', '1.13')\nNUMPY_LT_1_14 = not minversion('numpy', '1.14')\nNUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')\nNUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')\n",
"step-3": "<mask token>\nfrom ...utils import minversion\n__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11', 'NUMPY_LT_1_12',\n 'NUMPY_LT_1_13', 'NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']\nNUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')\nNUMPY_LT_1_11 = not minversion('numpy', '1.11.0')\nNUMPY_LT_1_12 = not minversion('numpy', '1.12')\nNUMPY_LT_1_13 = not minversion('numpy', '1.13')\nNUMPY_LT_1_14 = not minversion('numpy', '1.14')\nNUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')\nNUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')\n",
"step-4": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis is a collection of monkey patches and workarounds for bugs in\nearlier versions of Numpy.\n\"\"\"\nfrom ...utils import minversion\n\n\n__all__ = ['NUMPY_LT_1_10_4', 'NUMPY_LT_1_11',\n 'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14',\n 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2']\n\n# TODO: It might also be nice to have aliases to these named for specific\n# features/bugs we're checking for (ex:\n# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)\nNUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')\nNUMPY_LT_1_11 = not minversion('numpy', '1.11.0')\nNUMPY_LT_1_12 = not minversion('numpy', '1.12')\nNUMPY_LT_1_13 = not minversion('numpy', '1.13')\nNUMPY_LT_1_14 = not minversion('numpy', '1.14')\nNUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')\nNUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import json
def display_response(rsp):
try:
print("Printing a response.")
print("HTTP status code: ", rsp.status_code)
h = dict(rsp.headers)
print("Response headers: \n", json.dumps(h, indent=2, default=str))
try:
body = rsp.json()
print("JSON body: \n", json.dumps(body, indent=2, default=str))
except Exception as e:
body = rsp.text
print("Text body: \n", body)
except Exception as e:
print("display_response got exception e = ", e)
def test_get_from_hell():
try:
url = "http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all"
print("\n test 1, ", url)
result = requests.get(url)
display_response(result)
except Exception as e:
print("POST got exception = ", e)
test_get_from_hell()
|
normal
|
{
"blob_id": "31761b9469cc579c209e070fbe7b71943404a1ff",
"index": 3992,
"step-1": "<mask token>\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef display_response(rsp):\n try:\n print('Printing a response.')\n print('HTTP status code: ', rsp.status_code)\n h = dict(rsp.headers)\n print('Response headers: \\n', json.dumps(h, indent=2, default=str))\n try:\n body = rsp.json()\n print('JSON body: \\n', json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print('Text body: \\n', body)\n except Exception as e:\n print('display_response got exception e = ', e)\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef display_response(rsp):\n try:\n print('Printing a response.')\n print('HTTP status code: ', rsp.status_code)\n h = dict(rsp.headers)\n print('Response headers: \\n', json.dumps(h, indent=2, default=str))\n try:\n body = rsp.json()\n print('JSON body: \\n', json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print('Text body: \\n', body)\n except Exception as e:\n print('display_response got exception e = ', e)\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\ntest_get_from_hell()\n",
"step-4": "import requests\nimport json\n\n\ndef display_response(rsp):\n try:\n print('Printing a response.')\n print('HTTP status code: ', rsp.status_code)\n h = dict(rsp.headers)\n print('Response headers: \\n', json.dumps(h, indent=2, default=str))\n try:\n body = rsp.json()\n print('JSON body: \\n', json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print('Text body: \\n', body)\n except Exception as e:\n print('display_response got exception e = ', e)\n\n\ndef test_get_from_hell():\n try:\n url = (\n 'http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all'\n )\n print('\\n test 1, ', url)\n result = requests.get(url)\n display_response(result)\n except Exception as e:\n print('POST got exception = ', e)\n\n\ntest_get_from_hell()\n",
"step-5": "import requests\nimport json\n\ndef display_response(rsp):\n\n try:\n print(\"Printing a response.\")\n print(\"HTTP status code: \", rsp.status_code)\n h = dict(rsp.headers)\n print(\"Response headers: \\n\", json.dumps(h, indent=2, default=str))\n\n try:\n body = rsp.json()\n print(\"JSON body: \\n\", json.dumps(body, indent=2, default=str))\n except Exception as e:\n body = rsp.text\n print(\"Text body: \\n\", body)\n\n except Exception as e:\n print(\"display_response got exception e = \", e)\n\n\ndef test_get_from_hell():\n\n\n try:\n\n\n url = \"http://127.0.0.1:5000/api/lahman2017/people?children=appearances%2Cbatting&people.nameLast=Williams&batting.yearID=1960&appearances.yearID=1960&fields=people.playerID%2Cpeople.nameLast%2Cpeople.nameFirst%2Cbatting.AB%2Cbatting.H%2Cappearances.G_all\"\n print(\"\\n test 1, \", url)\n result = requests.get(url)\n display_response(result)\n\n\n except Exception as e:\n print(\"POST got exception = \", e)\n\n\ntest_get_from_hell()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#############################################################################
## Crytek Source File
## Copyright (C) 2013, Crytek Studios
##
## Creator: Christopher Bolte
## Date: Oct 31, 2013
## Description: WAF based build system
#############################################################################
from waflib.Configure import conf
def load_linux_x64_common_settings(v):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations
"""
# Add common linux x64 defines
v['DEFINES'] += [ 'LINUX64' ]
@conf
def load_debug_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'debug' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
@conf
def load_profile_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'profile' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
@conf
def load_performance_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'performance' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
@conf
def load_release_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'release' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
|
normal
|
{
"blob_id": "5848273a76995825f01df53d6beed534e6f9f9fe",
"index": 8730,
"step-1": "<mask token>\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n<mask token>\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n",
"step-2": "<mask token>\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_performance_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n",
"step-3": "<mask token>\n\n\ndef load_linux_x64_common_settings(v):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations\n\t\"\"\"\n v['DEFINES'] += ['LINUX64']\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_performance_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n",
"step-4": "from waflib.Configure import conf\n\n\ndef load_linux_x64_common_settings(v):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations\n\t\"\"\"\n v['DEFINES'] += ['LINUX64']\n\n\n@conf\ndef load_debug_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_profile_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_performance_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n\n\n@conf\ndef load_release_linux_x64_settings(conf):\n \"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n v = conf.env\n load_linux_x64_common_settings(v)\n",
"step-5": "#############################################################################\n## Crytek Source File\n## Copyright (C) 2013, Crytek Studios\n##\n## Creator: Christopher Bolte\n## Date: Oct 31, 2013\n## Description: WAF based build system\n#############################################################################\nfrom waflib.Configure import conf\n\ndef load_linux_x64_common_settings(v):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations\n\t\"\"\"\n\t\n\t# Add common linux x64 defines\n\tv['DEFINES'] += [ 'LINUX64' ]\t\n\t\n@conf\ndef load_debug_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'debug' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t\n@conf\ndef load_profile_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'profile' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t\n@conf\ndef load_performance_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'performance' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t\n@conf\ndef load_release_linux_x64_settings(conf):\n\t\"\"\"\n\tSetup all compiler and linker settings shared over all linux_x64 configurations for\n\tthe 'release' configuration\n\t\"\"\"\n\tv = conf.env\n\tload_linux_x64_common_settings(v)\n\t",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.contrib import admin
from .models import (AddressLink, Address, Child, Citation,
Configuration, Event, Exclusion, FactType,
Family, Group, Label, LinkAncestry,
Link, MediaLink, Multimedia, Name,
Person, Place, ResearchItem, Research,
Role, Source, SourceTemplate, Url,
Witness)
from . import EXODUS_DB_NAME
from .utils.admin import MultiDBModelAdmin
from .utils.rootsmagic import read_and_pprint_date
class RootsMagicModelAdmin(MultiDBModelAdmin):
using = EXODUS_DB_NAME
class AddressLinkAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"address",
"owner_id",
"address_number",
"details",
]
class AddressAdmin(RootsMagicModelAdmin):
pass
class ChildAdmin(RootsMagicModelAdmin):
list_display = [
"record_id",
"child",
"family",
"father_relationship",
"mother_relationship",
"child_order",
"is_private",
"father_proof",
"mother_proof",
"note",
]
raw_id_fields = [
'child',
'family',
]
class CitationAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"source_id",
"owner_id",
"quality",
"is_private",
"comments",
"actual_text",
"reference_number",
"flags",
# "fields",
]
class ConfigurationAdmin(RootsMagicModelAdmin):
pass
class EventAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"event_type",
"owner",
"owner_type",
"owner_id",
"family",
"place",
"site",
# "date",
"pretty_date",
"sort_date",
"is_primary",
"is_private",
"proof",
"status",
"edit_date",
"sentence",
# "details",
# "note",
]
def pretty_date(self, obj):
return read_and_pprint_date(obj.date)
pretty_date.short_description = "Date"
class ExclusionAdmin(RootsMagicModelAdmin):
pass
class FactTypeAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"name",
"abbreviation",
"gedcom_tag",
"use_value",
"use_date",
"use_place",
"sentence",
"flags",
]
class FamilyAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"father",
"mother",
"child",
"husband_order",
"wife_order",
"is_private",
"proof",
"spouse_label",
"father_label",
"mother_label",
# "note",
]
class GroupAdmin(RootsMagicModelAdmin):
pass
class LabelAdmin(RootsMagicModelAdmin):
pass
class LinkAncestryAdmin(RootsMagicModelAdmin):
pass
class LinkAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"ext_system",
"link_type",
"rootsmagic",
"ext_id",
"modified",
"ext_version",
"ext_date",
"status",
"note",
]
class MediaLinkAdmin(RootsMagicModelAdmin):
list_display = [
"link_id",
"media",
"owner",
"owner_type",
"owner_id",
"is_primary",
"include_1",
"include_2",
"include_3",
"include_4",
"sort_order",
"rectangle_left",
"rectangle_top",
"rectangle_right",
"rectangle_bottom",
"note",
"caption",
"reference_number",
"date",
"sort_date",
# "description",
]
class MultimediaAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"media_type",
"media_path",
"media_file",
"url",
"thumbnail",
"caption",
"reference_number",
# "date",
"pretty_date",
"sort_date",
# "description",
]
def pretty_date(self, obj):
return read_and_pprint_date(obj.date)
pretty_date.short_description = "Date"
class NameAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner",
"surname",
"given",
"prefix",
"suffix",
"nickname",
"name_type",
"date",
"sort_date",
"is_primary",
"is_private",
"proof",
"edit_date",
"sentence",
# "note",
"birth_year",
"death_year",
]
class PersonAdmin(RootsMagicModelAdmin):
list_display = [
"id",
'primary_name',
"sex_short",
"edit_date",
"parent",
"spouse",
"color",
"relate_1",
"relate_2",
"flags",
"is_living",
"is_private",
"proof",
"unique_id",
"bookmark",
# "note",
]
class PlaceAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"place_type",
"name",
"abbreviation",
"normalized",
"master_place",
# "latitude",
# "longitude",
"pretty_latlong",
"exact_latituate_longitude",
"note",
]
raw_id_fields = [
"master_place"
]
readonly_fields = [
"pretty_latlong"
]
class ResearchItemAdmin(RootsMagicModelAdmin):
pass
class ResearchAdmin(RootsMagicModelAdmin):
pass
class RoleAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"role_name",
"event_type",
"role_type",
"sentence",
]
class SourceAdmin(RootsMagicModelAdmin):
raw_id_fields = ['template']
class SourceTemplateAdmin(RootsMagicModelAdmin):
pass
class UrlAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"owner_type",
"owner_id",
"link_type",
"name",
"url",
"note",
]
class WitnessAdmin(RootsMagicModelAdmin):
list_display = [
"id",
"event",
"person",
"witness_order",
"role",
"sentence",
"note",
"given",
"surname",
"prefix",
"suffix",
]
admin.site.register(AddressLink, AddressLinkAdmin)
admin.site.register(Address, AddressAdmin)
admin.site.register(Child, ChildAdmin)
admin.site.register(Citation, CitationAdmin)
admin.site.register(Configuration, ConfigurationAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Exclusion, ExclusionAdmin)
admin.site.register(FactType, FactTypeAdmin)
admin.site.register(Family, FamilyAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Label, LabelAdmin)
admin.site.register(LinkAncestry, LinkAncestryAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(MediaLink, MediaLinkAdmin)
admin.site.register(Multimedia, MultimediaAdmin)
admin.site.register(Name, NameAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Place, PlaceAdmin)
admin.site.register(ResearchItem, ResearchItemAdmin)
admin.site.register(Research, ResearchAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(SourceTemplate, SourceTemplateAdmin)
admin.site.register(Url, UrlAdmin)
admin.site.register(Witness, WitnessAdmin)
|
normal
|
{
"blob_id": "b4d48427dddc7c0240cf05c003cbf7b0163279ee",
"index": 9729,
"step-1": "<mask token>\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event_type', 'owner', 'owner_type', 'owner_id',\n 'family', 'place', 'site', 'pretty_date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'status', 'edit_date', 'sentence']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CitationAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'source_id', 'owner_id', 'quality',\n 'is_private', 'comments', 'actual_text', 'reference_number', 'flags']\n\n\nclass ConfigurationAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event_type', 'owner', 'owner_type', 'owner_id',\n 'family', 'place', 'site', 'pretty_date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'status', 'edit_date', 'sentence']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ChildAdmin(RootsMagicModelAdmin):\n <mask token>\n <mask token>\n\n\nclass CitationAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'source_id', 'owner_id', 'quality',\n 'is_private', 'comments', 'actual_text', 'reference_number', 'flags']\n\n\nclass ConfigurationAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event_type', 'owner', 'owner_type', 'owner_id',\n 'family', 'place', 'site', 'pretty_date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'status', 'edit_date', 'sentence']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'name', 'abbreviation',\n 'gedcom_tag', 'use_value', 'use_date', 'use_place', 'sentence', 'flags'\n ]\n\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'father', 'mother', 'child', 'husband_order',\n 'wife_order', 'is_private', 'proof', 'spouse_label', 'father_label',\n 'mother_label']\n\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'ext_system', 'link_type', 'rootsmagic', 'ext_id',\n 'modified', 'ext_version', 'ext_date', 'status', 'note']\n\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = ['link_id', 'media', 'owner', 'owner_type', 'owner_id',\n 'is_primary', 'include_1', 'include_2', 'include_3', 'include_4',\n 'sort_order', 'rectangle_left', 'rectangle_top', 'rectangle_right',\n 'rectangle_bottom', 'note', 'caption', 'reference_number', 'date',\n 'sort_date']\n\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'media_type', 'media_path', 'media_file', 'url',\n 'thumbnail', 'caption', 'reference_number', 'pretty_date', 'sort_date']\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = 'Date'\n\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner', 'surname', 'given', 'prefix', 'suffix',\n 'nickname', 'name_type', 'date', 'sort_date', 'is_primary',\n 'is_private', 'proof', 'edit_date', 'sentence', 'birth_year',\n 'death_year']\n\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'primary_name', 'sex_short', 'edit_date',\n 'parent', 'spouse', 'color', 'relate_1', 'relate_2', 'flags',\n 'is_living', 'is_private', 'proof', 'unique_id', 'bookmark']\n\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'place_type', 'name', 'abbreviation',\n 'normalized', 'master_place', 'pretty_latlong',\n 'exact_latituate_longitude', 'note']\n raw_id_fields = ['master_place']\n readonly_fields = ['pretty_latlong']\n\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'role_name', 'event_type', 'role_type', 'sentence']\n\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'owner_type', 'owner_id', 'link_type', 'name',\n 'url', 'note']\n\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = ['id', 'event', 'person', 'witness_order', 'role',\n 'sentence', 'note', 'given', 'surname', 'prefix', 'suffix']\n\n\n<mask token>\n",
"step-5": "from django.contrib import admin\n\nfrom .models import (AddressLink, Address, Child, Citation,\n Configuration, Event, Exclusion, FactType,\n Family, Group, Label, LinkAncestry,\n Link, MediaLink, Multimedia, Name,\n Person, Place, ResearchItem, Research,\n Role, Source, SourceTemplate, Url,\n Witness)\n\nfrom . import EXODUS_DB_NAME\nfrom .utils.admin import MultiDBModelAdmin\nfrom .utils.rootsmagic import read_and_pprint_date\n\n\nclass RootsMagicModelAdmin(MultiDBModelAdmin):\n using = EXODUS_DB_NAME\n\n\nclass AddressLinkAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"address\",\n \"owner_id\",\n \"address_number\",\n \"details\",\n ]\n\nclass AddressAdmin(RootsMagicModelAdmin):\n pass\n\nclass ChildAdmin(RootsMagicModelAdmin):\n list_display = [\n \"record_id\",\n \"child\",\n \"family\",\n \"father_relationship\",\n \"mother_relationship\",\n \"child_order\",\n \"is_private\",\n \"father_proof\",\n \"mother_proof\",\n \"note\",\n ]\n raw_id_fields = [\n 'child',\n 'family',\n ]\n\nclass CitationAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"source_id\",\n \"owner_id\",\n \"quality\",\n \"is_private\",\n \"comments\",\n \"actual_text\",\n \"reference_number\",\n \"flags\",\n # \"fields\",\n ]\n\nclass ConfigurationAdmin(RootsMagicModelAdmin):\n pass\n\nclass EventAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"event_type\",\n \"owner\",\n \"owner_type\",\n \"owner_id\",\n \"family\",\n \"place\",\n \"site\",\n # \"date\",\n \"pretty_date\",\n \"sort_date\",\n \"is_primary\",\n \"is_private\",\n \"proof\",\n \"status\",\n \"edit_date\",\n \"sentence\",\n # \"details\",\n # \"note\",\n ]\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = \"Date\"\n\nclass ExclusionAdmin(RootsMagicModelAdmin):\n pass\n\nclass FactTypeAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"name\",\n \"abbreviation\",\n \"gedcom_tag\",\n \"use_value\",\n \"use_date\",\n \"use_place\",\n \"sentence\",\n \"flags\",\n ]\n\nclass FamilyAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"father\",\n \"mother\",\n \"child\",\n \"husband_order\",\n \"wife_order\",\n \"is_private\",\n \"proof\",\n \"spouse_label\",\n \"father_label\",\n \"mother_label\",\n # \"note\",\n ]\n\nclass GroupAdmin(RootsMagicModelAdmin):\n pass\n\nclass LabelAdmin(RootsMagicModelAdmin):\n pass\n\nclass LinkAncestryAdmin(RootsMagicModelAdmin):\n pass\n\nclass LinkAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"ext_system\",\n \"link_type\",\n \"rootsmagic\",\n \"ext_id\",\n \"modified\",\n \"ext_version\",\n \"ext_date\",\n \"status\",\n \"note\",\n ]\n\nclass MediaLinkAdmin(RootsMagicModelAdmin):\n list_display = [\n \"link_id\",\n \"media\",\n \"owner\",\n \"owner_type\",\n \"owner_id\",\n \"is_primary\",\n \"include_1\",\n \"include_2\",\n \"include_3\",\n \"include_4\",\n \"sort_order\",\n \"rectangle_left\",\n \"rectangle_top\",\n \"rectangle_right\",\n \"rectangle_bottom\",\n \"note\",\n \"caption\",\n \"reference_number\",\n \"date\",\n \"sort_date\",\n # \"description\",\n ]\n\nclass MultimediaAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"media_type\",\n \"media_path\",\n \"media_file\",\n \"url\",\n \"thumbnail\",\n \"caption\",\n \"reference_number\",\n # \"date\",\n \"pretty_date\",\n \"sort_date\",\n # \"description\",\n ]\n\n def pretty_date(self, obj):\n return read_and_pprint_date(obj.date)\n pretty_date.short_description = \"Date\"\n\nclass NameAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner\",\n \"surname\",\n \"given\",\n \"prefix\",\n \"suffix\",\n \"nickname\",\n \"name_type\",\n \"date\",\n \"sort_date\",\n \"is_primary\",\n \"is_private\",\n \"proof\",\n \"edit_date\",\n \"sentence\",\n # \"note\",\n \"birth_year\",\n \"death_year\",\n ]\n\nclass PersonAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n 'primary_name',\n \"sex_short\",\n \"edit_date\",\n \"parent\",\n \"spouse\",\n \"color\",\n \"relate_1\",\n \"relate_2\",\n \"flags\",\n \"is_living\",\n \"is_private\",\n \"proof\",\n \"unique_id\",\n \"bookmark\",\n # \"note\",\n ]\n\nclass PlaceAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"place_type\",\n \"name\",\n \"abbreviation\",\n \"normalized\",\n \"master_place\",\n # \"latitude\",\n # \"longitude\",\n \"pretty_latlong\",\n \"exact_latituate_longitude\",\n \"note\",\n ]\n raw_id_fields = [\n \"master_place\"\n ]\n readonly_fields = [\n \"pretty_latlong\"\n ]\n\nclass ResearchItemAdmin(RootsMagicModelAdmin):\n pass\n\nclass ResearchAdmin(RootsMagicModelAdmin):\n pass\n\nclass RoleAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"role_name\",\n \"event_type\",\n \"role_type\",\n \"sentence\",\n ]\n\nclass SourceAdmin(RootsMagicModelAdmin):\n raw_id_fields = ['template']\n\nclass SourceTemplateAdmin(RootsMagicModelAdmin):\n pass\n\nclass UrlAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"owner_type\",\n \"owner_id\",\n \"link_type\",\n \"name\",\n \"url\",\n \"note\",\n ]\n\nclass WitnessAdmin(RootsMagicModelAdmin):\n list_display = [\n \"id\",\n \"event\",\n \"person\",\n \"witness_order\",\n \"role\",\n \"sentence\",\n \"note\",\n \"given\",\n \"surname\",\n \"prefix\",\n \"suffix\",\n ]\n\n\n\nadmin.site.register(AddressLink, AddressLinkAdmin)\nadmin.site.register(Address, AddressAdmin)\nadmin.site.register(Child, ChildAdmin)\nadmin.site.register(Citation, CitationAdmin)\nadmin.site.register(Configuration, ConfigurationAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Exclusion, ExclusionAdmin)\nadmin.site.register(FactType, FactTypeAdmin)\nadmin.site.register(Family, FamilyAdmin)\nadmin.site.register(Group, GroupAdmin)\nadmin.site.register(Label, LabelAdmin)\nadmin.site.register(LinkAncestry, LinkAncestryAdmin)\nadmin.site.register(Link, LinkAdmin)\nadmin.site.register(MediaLink, MediaLinkAdmin)\nadmin.site.register(Multimedia, MultimediaAdmin)\nadmin.site.register(Name, NameAdmin)\nadmin.site.register(Person, PersonAdmin)\nadmin.site.register(Place, PlaceAdmin)\nadmin.site.register(ResearchItem, ResearchItemAdmin)\nadmin.site.register(Research, ResearchAdmin)\nadmin.site.register(Role, RoleAdmin)\nadmin.site.register(Source, SourceAdmin)\nadmin.site.register(SourceTemplate, SourceTemplateAdmin)\nadmin.site.register(Url, UrlAdmin)\nadmin.site.register(Witness, WitnessAdmin)\n",
"step-ids": [
31,
35,
38,
39,
48
]
}
|
[
31,
35,
38,
39,
48
] |
# coding=utf-8
"""
PYOPENGL-TOOLBOX UTILS
General purpouse functions.
MIT License
Copyright (c) 2015-2019 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Library imports
from __future__ import print_function
from PyOpenGLtoolbox.geometry import draw_vertex_list
from PyOpenGLtoolbox.mathlib import Point3
import sys as _sys
# noinspection PyPep8Naming
import OpenGL.GL as _gl
# noinspection PyPep8Naming
import OpenGL.GLUT as _glut
# Constants
_UTILS_COLOR_BLACK = [0, 0, 0]
_UTILS_COLOR_WHITE = [1, 1, 1]
_UTILS_ERRS = [False]
def print_gl_error(err_msg):
"""
Prints an OpenGL error to console.
:param err_msg: Error message
:type err_msg: basestring
"""
if len(err_msg) == 0:
return
print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)
# noinspection PyUnresolvedReferences
def create_axes(length, both=False, text=False, font=_glut.GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0: # Valid length
# Crate points
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
# Create list
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
# Init primitve
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both: # Draw axes in both directions
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
# End primitive
_gl.glEnd()
if text: # Draw axes names
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
# Returns list
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
# noinspection PyUnresolvedReferences
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == "\n":
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
# noinspection PyBroadException
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
def get_rgb_normalized(r, g, b, a=1.0):
"""
Return rgb color normalized (from 0 to 1).
:param r: Red color
:param g: Green color
:param b: Blue color
:param a: Alpha
:type r: float, int
:type g: float, int
:type b: float, int
:type a: float
:return: RGBA tuple
:rtype: tuple
"""
if r <= 1 and g <= 1 and b <= 1:
return r, g, b, a
return r / 255.0, g / 255.0, b / 255.0, a
|
normal
|
{
"blob_id": "cffcfa08cd919f93dfe2ab8dc676efc76feafab3",
"index": 2123,
"step-1": "<mask token>\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-3": "<mask token>\n_UTILS_COLOR_BLACK = [0, 0, 0]\n_UTILS_COLOR_WHITE = [1, 1, 1]\n_UTILS_ERRS = [False]\n\n\ndef print_gl_error(err_msg):\n \"\"\"\n Prints an OpenGL error to console.\n\n :param err_msg: Error message\n :type err_msg: basestring\n \"\"\"\n if len(err_msg) == 0:\n return\n print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nfrom PyOpenGLtoolbox.geometry import draw_vertex_list\nfrom PyOpenGLtoolbox.mathlib import Point3\nimport sys as _sys\nimport OpenGL.GL as _gl\nimport OpenGL.GLUT as _glut\n_UTILS_COLOR_BLACK = [0, 0, 0]\n_UTILS_COLOR_WHITE = [1, 1, 1]\n_UTILS_ERRS = [False]\n\n\ndef print_gl_error(err_msg):\n \"\"\"\n Prints an OpenGL error to console.\n\n :param err_msg: Error message\n :type err_msg: basestring\n \"\"\"\n if len(err_msg) == 0:\n return\n print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)\n\n\ndef create_axes(length, both=False, text=False, font=_glut.\n GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0:\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n if both:\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n _gl.glEnd()\n if text:\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n _gl.glEndList()\n return lista\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == '\\n':\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error(\n 'Actual OpenGL version doest not support glutBitmapCharacter function'\n )\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-5": "# coding=utf-8\n\"\"\"\nPYOPENGL-TOOLBOX UTILS\nGeneral purpouse functions.\n\nMIT License\nCopyright (c) 2015-2019 Pablo Pizarro R.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n# Library imports\nfrom __future__ import print_function\nfrom PyOpenGLtoolbox.geometry import draw_vertex_list\nfrom PyOpenGLtoolbox.mathlib import Point3\nimport sys as _sys\n\n# noinspection PyPep8Naming\nimport OpenGL.GL as _gl\n\n# noinspection PyPep8Naming\nimport OpenGL.GLUT as _glut\n\n# Constants\n_UTILS_COLOR_BLACK = [0, 0, 0]\n_UTILS_COLOR_WHITE = [1, 1, 1]\n_UTILS_ERRS = [False]\n\n\ndef print_gl_error(err_msg):\n \"\"\"\n Prints an OpenGL error to console.\n\n :param err_msg: Error message\n :type err_msg: basestring\n \"\"\"\n if len(err_msg) == 0:\n return\n print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)\n\n\n# noinspection PyUnresolvedReferences\ndef create_axes(length, both=False, text=False, font=_glut.GLUT_BITMAP_HELVETICA_18):\n \"\"\"\n Create axes system.\n\n :param length: Axes length\n :param both: Both axes\n :param text: Show axes names (x,y,z)\n :param font: Font\n :type length: float, int\n :type both: bool\n :type text: bool\n :type font: int\n :return: OpenGL list\n \"\"\"\n if length > 0: # Valid length\n\n # Crate points\n x = Point3(length, 0, 0)\n y = Point3(0, length, 0)\n z = Point3(0, 0, length)\n o = Point3()\n\n # Create list\n lista = _gl.glGenLists(1)\n _gl.glNewList(lista, _gl.GL_COMPILE)\n\n # Init primitve\n _gl.glBegin(_gl.GL_LINES)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n\n if both: # Draw axes in both directions\n x = Point3(-length, 0, 0)\n y = Point3(0, -length, 0)\n z = Point3(0, 0, -length)\n _gl.glColor4fv([1, 0, 0, 1])\n draw_vertex_list([o, x])\n _gl.glColor4fv([0, 1, 0, 1])\n draw_vertex_list([o, y])\n _gl.glColor4fv([0, 0, 1, 1])\n draw_vertex_list([o, z])\n\n # End primitive\n _gl.glEnd()\n\n if text: # Draw axes names\n draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)\n draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)\n draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)\n\n if both:\n draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)\n draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)\n draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)\n\n # Returns list\n _gl.glEndList()\n return lista\n\n else:\n raise Exception('Axes length must be positive, greater than zero')\n\n\n# noinspection PyUnresolvedReferences\ndef draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):\n \"\"\"Dibuja un texto en una posicon dada por un punto point3\"\"\"\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == \"\\n\":\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n # noinspection PyBroadException\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')\n\n\ndef get_rgb_normalized(r, g, b, a=1.0):\n \"\"\"\n Return rgb color normalized (from 0 to 1).\n\n :param r: Red color\n :param g: Green color\n :param b: Blue color\n :param a: Alpha\n :type r: float, int\n :type g: float, int\n :type b: float, int\n :type a: float\n :return: RGBA tuple\n :rtype: tuple\n \"\"\"\n if r <= 1 and g <= 1 and b <= 1:\n return r, g, b, a\n return r / 255.0, g / 255.0, b / 255.0, a\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import random
def main():
#print('You rolled a die')
return random.randint(1,6)
if __name__== "__main__":
main()
|
normal
|
{
"blob_id": "6d92b944ab8503d3635626c0c23021fc2b40dce3",
"index": 5732,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n return random.randint(1, 6)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n return random.randint(1, 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import random\n\n\ndef main():\n return random.randint(1, 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import random\n\ndef main():\n #print('You rolled a die')\n return random.randint(1,6)\n\nif __name__== \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from menu_sun_integration.application.adapters.customer_adapter import CustomerAdapter
from menu_sun_integration.infrastructure.brf.builders.brf_base_builder import BRFBaseBuilder
from menu_sun_integration.infrastructure.brf.translators.brf_customer_translator import BRFCustomerTranslator
class BRFCustomerBuilder(BRFBaseBuilder):
def define_translator(self) ->None:
self._translator = BRFCustomerTranslator()
def build_adapter(self) ->None:
self._adapter = CustomerAdapter(client=self._client, translator=
self._translator)
|
normal
|
{
"blob_id": "8020bac94de3e68193c9891a628a48c537c5afa0",
"index": 9069,
"step-1": "<mask token>\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n\n def define_translator(self) ->None:\n self._translator = BRFCustomerTranslator()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n\n def define_translator(self) ->None:\n self._translator = BRFCustomerTranslator()\n\n def build_adapter(self) ->None:\n self._adapter = CustomerAdapter(client=self._client, translator=\n self._translator)\n",
"step-4": "from menu_sun_integration.application.adapters.customer_adapter import CustomerAdapter\nfrom menu_sun_integration.infrastructure.brf.builders.brf_base_builder import BRFBaseBuilder\nfrom menu_sun_integration.infrastructure.brf.translators.brf_customer_translator import BRFCustomerTranslator\n\n\nclass BRFCustomerBuilder(BRFBaseBuilder):\n\n def define_translator(self) ->None:\n self._translator = BRFCustomerTranslator()\n\n def build_adapter(self) ->None:\n self._adapter = CustomerAdapter(client=self._client, translator=\n self._translator)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
"""
Test 1, problem 1.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Nathan Gupta. March 2016.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
test_problem1a()
test_problem1b()
test_problem1c()
def is_palindrome(n):
"""
What comes in: An non-negative integer n.
What goes out: Returns True if the given integer is a palindrome,
that is, if it reads the same backwards and forwards.
Returns False if the given integer is not a palindrome.
Side effects: None.
Examples:
-- if n is 12344321 this function returns True
-- if n is 121121 this function returns True
-- if n is 372273 this function returns True
-- if n is 88 this function returns True
-- if n is 808 this function returns True
-- if n is 1 this function returns True
-- if n is 6556 this function returns True
-- if n is 6557 this function returns False
-- if n is 228 this function returns False
-- if n is 81 this function returns False
"""
####################################################################
# Ask your instructor for help if you do not understand
# the green doc-string above.
####################################################################
forwards = str(n)
backwards = str(n)[::-1]
return forwards == backwards
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_palindrome function
# - it has no TODO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out: True if the given integer is prime, else False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TODO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def test_problem1a():
""" Tests the problem1a function. """
# ------------------------------------------------------------------
# DONE: 2. Implement this TEST function.
# It TESTS the problem1a function defined below.
# Include at least ** 5 ** tests.
#
# Use the same 4-step process as for previous TEST functions.
# In particular, include both EXPECTED and ACTUAL results.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the problem1a function:')
print('--------------------------------------------------')
expected = 95
answer = problem1a(5, 2)
print('Test 1 expected:', expected)
print(' actual: ', answer)
expected = 1576
answer = problem1a(10, 3)
print('Test 2 expected:', expected)
print(' actual: ', answer)
expected = 32312
answer = problem1a(15, 4)
print('Test 3 expected:', expected)
print(' actual: ', answer)
expected = 639655
answer = problem1a(20, 5)
print('Test 4 expected:', expected)
print(' actual: ', answer)
expected = 13321704
answer = problem1a(25, 6)
print('Test 5 expected:', expected)
print(' actual: ', answer)
# This test takes some time to finish but it does work.
expected = 283359305
answer = problem1a(30, 7)
print('Test 6 expected:', expected)
print(' actual: ', answer)
def problem1a(m, p):
"""
What comes in: Positive integers m and p,
with m >= 2 and (5 raised to the pth power) >= m.
What goes out: Returns the sum of all the integers
between m and (5 raised to the pth power), inclusive,
that are prime.
Side effects: None.
Examples:
-- If m is 11 and p = 2, this function returns 83,
because the sum of the primes
between 4 and (5 to the 2nd power, i.e. 25) is:
11 + 13 + 17 + 19 + 23, which is 83.
-- If m is 70 and p = 3, this function returns 1025,
because the sum of the primes between 70 and
(5 to the 3rd power, i.e. 125) is:
71 + 73 + 79 + 83 + 89 + 97 + 101 + 103 + 107 + 109 + 113,
which is 1025.
-- If m is 2 and p = 1, this function returns 10,
because the sum of the primes between 2 and
(5 to the 1st power, i.e. 5) is:
2 + 3 + 5, which is 10.
-- If m is 1000 and p = 6,
this function returns 13245677 (trust me!)
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
#
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the is_prime function that is defined above.
####################################################################
# ------------------------------------------------------------------
tot = 0
for i in range(m, 5 ** p + 1):
if is_prime(i) == True:
tot = tot + i
return tot
def test_problem1b():
""" Tests the problem1b function. """
print()
print('--------------------------------------------------')
print('Testing the problem1b function:')
print('--------------------------------------------------')
####################################################################
# THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.
# You may add more tests if you want,
# but you are not required to do so.
####################################################################
# Test 1:
expected = True
answer = problem1b(17, 2)
print()
print('Test 1 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 2:
expected = False
answer = problem1b(18, 2)
print()
print('Test 2 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 3:
expected = True
answer = problem1b(85, 3)
print()
print('Test 3 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 4:
expected = True
answer = problem1b(89, 3)
print()
print('Test 4 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 5:
expected = False
answer = problem1b(90, 3)
print()
print('Test 5 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 6:
expected = False
answer = problem1b(449, 4)
print()
print('Test 6 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 7:
expected = True
answer = problem1b(450, 4)
print()
print('Test 7 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 8:
expected = True
answer = problem1b(457, 4)
print()
print('Test 8 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 9:
expected = False
answer = problem1b(458, 4)
print()
print('Test 9 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 10:
expected = False
answer = problem1b(569, 5)
print()
print('Test 10 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 11:
expected = True
answer = problem1b(570, 5)
print()
print('Test 11 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 12:
expected = True
answer = problem1b(571, 5)
print()
print('Test 12 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 13:
expected = False
answer = problem1b(572, 5)
print()
print('Test 13 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 14:
expected = True
answer = problem1b(15610, 6)
print()
print('Test 14 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 15:
expected = False
answer = problem1b(15600, 6)
print()
print('Test 15 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 16:
expected = False
answer = problem1b(10000, 6)
print()
print('Test 16 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 17:
expected = True
answer = problem1b(5861, 6)
print()
print('Test 17 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 18:
expected = False
answer = problem1b(5862, 6)
print()
print('Test 18 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
def problem1b(m, p):
"""
What comes in: Positive integers m and p,
with m >= 2 and (5 raised to the pth power) >= m.
What goes out: Let X = the sum of all the integers
between m and (5 raised to the pth power), inclusive,
that are prime.
This function returns True if X is prime.
This function returns False if X is NOT a prime.
Side effects: None.
Examples:
-- If m is 17 and p = 2, this function returns True,
because the sum of the primes
between 17 and (5 to the 2nd power, i.e. 25) is:
17 + 19 + 23, which is 59,
and 59 IS prime.
-- If m is 18 and p = 2, this function returns False,
because the sum of the primes
between 18 and (5 to the 2nd power, i.e. 25) is:
19 + 23, which is 42,
and 42 is NOT prime.
-- If m is 85 and p = 3, this function returns True,
because the sum of the primes
between 85 and (5 to the 3rd power, i.e. 125) is:
89 + 91 + 97 + 101 + 103 + 107 + 109 + 113, which is 719,
and 719 IS prime.
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the appropriate functions that are defined above
# ** possibly including ones you have written.
####################################################################
# ------------------------------------------------------------------
if is_prime(problem1a(m, p)) == True:
return True
else:
return False
def test_problem1c():
""" Tests the problem1c function. """
print()
print('--------------------------------------------------')
print('Testing the problem1c function:')
print('--------------------------------------------------')
####################################################################
# THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.
# You may add more tests if you want,
# but you are not required to do so.
####################################################################
# Test 1:
expected = 5 * 10
answer = problem1c(50, 100)
print()
print('Test 1 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 2:
expected = 2 * 8
answer = problem1c(23, 53)
print()
print('Test 2 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 3:
expected = 2 * 5
answer = problem1c(33, 53)
print()
print('Test 3 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 4:
expected = 1 * 0
answer = problem1c(20, 22)
print()
print('Test 4 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 5:
expected = 4 * 7
answer = problem1c(101, 131)
print()
print('Test 5 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 6:
expected = 2 * 5
answer = problem1c(102, 130)
print()
print('Test 6 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 7:
expected = 107 * 168
answer = problem1c(2, 1000)
print()
print('Test 7 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 8:
expected = 90 * 1061
answer = problem1c(1000, 10000)
print()
print('Test 8 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 9:
expected = 83 * 133
answer = problem1c(101, 929)
print()
print('Test 9 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 10:
expected = 83 * 133
answer = problem1c(100, 930)
print()
print('Test 10 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 11:
expected = 81 * 131
answer = problem1c(102, 928)
print()
print('Test 11 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 12:
expected = 82 * 132
answer = problem1c(101, 928)
print()
print('Test 12 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 13:
expected = 82 * 132
answer = problem1c(102, 929)
print()
print('Test 13 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
# Test 14:
expected = 280 * 2237
answer = problem1c(100, 20000)
print()
print('Test 14 expected:', expected)
print(' actual: ', answer)
if expected != answer:
print(' **** THIS TEST FAILED. ****')
def problem1c(m, n):
"""
What comes in: Positive integers m and n, with m <= n.
What goes out: Returns the product XY where:
-- X is the number of integers from m to n, inclusive,
that are PALINDROMES.
-- Y is the number of integers from m to n, inclusive,
that are PRIME.
Side effects: None.
Examples:
-- If m is 50 and n is 100:
this function returns 5 * 10, which is 50,
because the palindromes between 50 and 100 are:
55 66 77 88 99 [so there are 5 of them]
and the primes between 50 and 100 are:
53 59 61 67 71 73 79 83 89 97
[so there are 10 of them]
-- If m is 23 and n is 53:
this function returns 2 * 8, which is 16,
because the palindromes between 23 and 53 are
33 44 [so there are 2 of them]
and the primes between 23 and 53 are
23 29 31 37 41 43 47 53
[so there are 8 of them]
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
####################################################################
# IMPORTANT:
# ** For full credit you must appropriately use (call)
# ** the appropriate functions that are defined above.
####################################################################
# ------------------------------------------------------------------
count = 0
count1 = 0
for i in range(m, n + 1):
if is_palindrome(i) == True:
count += 1
if is_prime(i) == True:
count1 += 1
return count * count1
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
normal
|
{
"blob_id": "ca6a9656efe439c9e90f2724e38e652a09e46dae",
"index": 7686,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_palindrome(n):\n \"\"\"\n What comes in: An non-negative integer n.\n What goes out: Returns True if the given integer is a palindrome,\n that is, if it reads the same backwards and forwards.\n Returns False if the given integer is not a palindrome.\n Side effects: None.\n Examples:\n -- if n is 12344321 this function returns True\n -- if n is 121121 this function returns True\n -- if n is 372273 this function returns True\n -- if n is 88 this function returns True\n -- if n is 808 this function returns True\n -- if n is 1 this function returns True\n -- if n is 6556 this function returns True\n\n -- if n is 6557 this function returns False\n -- if n is 228 this function returns False\n -- if n is 81 this function returns False\n \"\"\"\n forwards = str(n)\n backwards = str(n)[::-1]\n return forwards == backwards\n\n\n<mask token>\n\n\ndef test_problem1a():\n \"\"\" Tests the problem1a function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1a function:')\n print('--------------------------------------------------')\n expected = 95\n answer = problem1a(5, 2)\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n expected = 1576\n answer = problem1a(10, 3)\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n expected = 32312\n answer = problem1a(15, 4)\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n expected = 639655\n answer = problem1a(20, 5)\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n expected = 13321704\n answer = problem1a(25, 6)\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n expected = 283359305\n answer = problem1a(30, 7)\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n\n\ndef problem1a(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Returns the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n Side effects: None.\n Examples:\n -- If m is 11 and p = 2, this function returns 83,\n because the sum of the primes\n between 4 and (5 to the 2nd power, i.e. 25) is:\n 11 + 13 + 17 + 19 + 23, which is 83.\n -- If m is 70 and p = 3, this function returns 1025,\n because the sum of the primes between 70 and\n (5 to the 3rd power, i.e. 125) is:\n 71 + 73 + 79 + 83 + 89 + 97 + 101 + 103 + 107 + 109 + 113,\n which is 1025.\n -- If m is 2 and p = 1, this function returns 10,\n because the sum of the primes between 2 and\n (5 to the 1st power, i.e. 5) is:\n 2 + 3 + 5, which is 10.\n -- If m is 1000 and p = 6,\n this function returns 13245677 (trust me!)\n \"\"\"\n tot = 0\n for i in range(m, 5 ** p + 1):\n if is_prime(i) == True:\n tot = tot + i\n return tot\n\n\ndef test_problem1b():\n \"\"\" Tests the problem1b function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1b function:')\n print('--------------------------------------------------')\n expected = True\n answer = problem1b(17, 2)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(18, 2)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(85, 3)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(89, 3)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(90, 3)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(449, 4)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(450, 4)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(457, 4)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(458, 4)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(569, 5)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(570, 5)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(571, 5)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(572, 5)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(15610, 6)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(15600, 6)\n print()\n print('Test 15 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(10000, 6)\n print()\n print('Test 16 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(5861, 6)\n print()\n print('Test 17 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(5862, 6)\n print()\n print('Test 18 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\n<mask token>\n\n\ndef problem1c(m, n):\n \"\"\"\n What comes in: Positive integers m and n, with m <= n.\n What goes out: Returns the product XY where:\n -- X is the number of integers from m to n, inclusive,\n that are PALINDROMES.\n -- Y is the number of integers from m to n, inclusive,\n that are PRIME.\n Side effects: None.\n Examples:\n -- If m is 50 and n is 100:\n this function returns 5 * 10, which is 50,\n because the palindromes between 50 and 100 are:\n 55 66 77 88 99 [so there are 5 of them]\n and the primes between 50 and 100 are:\n 53 59 61 67 71 73 79 83 89 97\n [so there are 10 of them]\n -- If m is 23 and n is 53:\n this function returns 2 * 8, which is 16,\n because the palindromes between 23 and 53 are\n 33 44 [so there are 2 of them]\n and the primes between 23 and 53 are\n 23 29 31 37 41 43 47 53\n [so there are 8 of them]\n \"\"\"\n count = 0\n count1 = 0\n for i in range(m, n + 1):\n if is_palindrome(i) == True:\n count += 1\n if is_prime(i) == True:\n count1 += 1\n return count * count1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\" Calls the TEST functions in this module. \"\"\"\n test_problem1a()\n test_problem1b()\n test_problem1c()\n\n\ndef is_palindrome(n):\n \"\"\"\n What comes in: An non-negative integer n.\n What goes out: Returns True if the given integer is a palindrome,\n that is, if it reads the same backwards and forwards.\n Returns False if the given integer is not a palindrome.\n Side effects: None.\n Examples:\n -- if n is 12344321 this function returns True\n -- if n is 121121 this function returns True\n -- if n is 372273 this function returns True\n -- if n is 88 this function returns True\n -- if n is 808 this function returns True\n -- if n is 1 this function returns True\n -- if n is 6556 this function returns True\n\n -- if n is 6557 this function returns False\n -- if n is 228 this function returns False\n -- if n is 81 this function returns False\n \"\"\"\n forwards = str(n)\n backwards = str(n)[::-1]\n return forwards == backwards\n\n\ndef is_prime(n):\n \"\"\"\n What comes in: An integer n >= 2.\n What goes out: True if the given integer is prime, else False.\n Side effects: None.\n Examples:\n -- is_prime(11) returns True\n -- is_prime(12) returns False\n -- is_prime(2) returns True\n Note: The algorithm used here is simple and clear but slow.\n \"\"\"\n for k in range(2, n // 2 + 1):\n if n % k == 0:\n return False\n return True\n\n\ndef test_problem1a():\n \"\"\" Tests the problem1a function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1a function:')\n print('--------------------------------------------------')\n expected = 95\n answer = problem1a(5, 2)\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n expected = 1576\n answer = problem1a(10, 3)\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n expected = 32312\n answer = problem1a(15, 4)\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n expected = 639655\n answer = problem1a(20, 5)\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n expected = 13321704\n answer = problem1a(25, 6)\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n expected = 283359305\n answer = problem1a(30, 7)\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n\n\ndef problem1a(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Returns the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n Side effects: None.\n Examples:\n -- If m is 11 and p = 2, this function returns 83,\n because the sum of the primes\n between 4 and (5 to the 2nd power, i.e. 25) is:\n 11 + 13 + 17 + 19 + 23, which is 83.\n -- If m is 70 and p = 3, this function returns 1025,\n because the sum of the primes between 70 and\n (5 to the 3rd power, i.e. 125) is:\n 71 + 73 + 79 + 83 + 89 + 97 + 101 + 103 + 107 + 109 + 113,\n which is 1025.\n -- If m is 2 and p = 1, this function returns 10,\n because the sum of the primes between 2 and\n (5 to the 1st power, i.e. 5) is:\n 2 + 3 + 5, which is 10.\n -- If m is 1000 and p = 6,\n this function returns 13245677 (trust me!)\n \"\"\"\n tot = 0\n for i in range(m, 5 ** p + 1):\n if is_prime(i) == True:\n tot = tot + i\n return tot\n\n\ndef test_problem1b():\n \"\"\" Tests the problem1b function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1b function:')\n print('--------------------------------------------------')\n expected = True\n answer = problem1b(17, 2)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(18, 2)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(85, 3)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(89, 3)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(90, 3)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(449, 4)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(450, 4)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(457, 4)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(458, 4)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(569, 5)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(570, 5)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(571, 5)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(572, 5)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(15610, 6)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(15600, 6)\n print()\n print('Test 15 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(10000, 6)\n print()\n print('Test 16 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(5861, 6)\n print()\n print('Test 17 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(5862, 6)\n print()\n print('Test 18 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\ndef problem1b(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Let X = the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n This function returns True if X is prime.\n This function returns False if X is NOT a prime.\n Side effects: None.\n Examples:\n -- If m is 17 and p = 2, this function returns True,\n because the sum of the primes\n between 17 and (5 to the 2nd power, i.e. 25) is:\n 17 + 19 + 23, which is 59,\n and 59 IS prime.\n -- If m is 18 and p = 2, this function returns False,\n because the sum of the primes\n between 18 and (5 to the 2nd power, i.e. 25) is:\n 19 + 23, which is 42,\n and 42 is NOT prime.\n -- If m is 85 and p = 3, this function returns True,\n because the sum of the primes\n between 85 and (5 to the 3rd power, i.e. 125) is:\n 89 + 91 + 97 + 101 + 103 + 107 + 109 + 113, which is 719,\n and 719 IS prime.\n \"\"\"\n if is_prime(problem1a(m, p)) == True:\n return True\n else:\n return False\n\n\ndef test_problem1c():\n \"\"\" Tests the problem1c function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1c function:')\n print('--------------------------------------------------')\n expected = 5 * 10\n answer = problem1c(50, 100)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 2 * 8\n answer = problem1c(23, 53)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 2 * 5\n answer = problem1c(33, 53)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 1 * 0\n answer = problem1c(20, 22)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 4 * 7\n answer = problem1c(101, 131)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 2 * 5\n answer = problem1c(102, 130)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 107 * 168\n answer = problem1c(2, 1000)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 90 * 1061\n answer = problem1c(1000, 10000)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 83 * 133\n answer = problem1c(101, 929)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 83 * 133\n answer = problem1c(100, 930)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 81 * 131\n answer = problem1c(102, 928)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 82 * 132\n answer = problem1c(101, 928)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 82 * 132\n answer = problem1c(102, 929)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 280 * 2237\n answer = problem1c(100, 20000)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\ndef problem1c(m, n):\n \"\"\"\n What comes in: Positive integers m and n, with m <= n.\n What goes out: Returns the product XY where:\n -- X is the number of integers from m to n, inclusive,\n that are PALINDROMES.\n -- Y is the number of integers from m to n, inclusive,\n that are PRIME.\n Side effects: None.\n Examples:\n -- If m is 50 and n is 100:\n this function returns 5 * 10, which is 50,\n because the palindromes between 50 and 100 are:\n 55 66 77 88 99 [so there are 5 of them]\n and the primes between 50 and 100 are:\n 53 59 61 67 71 73 79 83 89 97\n [so there are 10 of them]\n -- If m is 23 and n is 53:\n this function returns 2 * 8, which is 16,\n because the palindromes between 23 and 53 are\n 33 44 [so there are 2 of them]\n and the primes between 23 and 53 are\n 23 29 31 37 41 43 47 53\n [so there are 8 of them]\n \"\"\"\n count = 0\n count1 = 0\n for i in range(m, n + 1):\n if is_palindrome(i) == True:\n count += 1\n if is_prime(i) == True:\n count1 += 1\n return count * count1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef main():\n \"\"\" Calls the TEST functions in this module. \"\"\"\n test_problem1a()\n test_problem1b()\n test_problem1c()\n\n\ndef is_palindrome(n):\n \"\"\"\n What comes in: An non-negative integer n.\n What goes out: Returns True if the given integer is a palindrome,\n that is, if it reads the same backwards and forwards.\n Returns False if the given integer is not a palindrome.\n Side effects: None.\n Examples:\n -- if n is 12344321 this function returns True\n -- if n is 121121 this function returns True\n -- if n is 372273 this function returns True\n -- if n is 88 this function returns True\n -- if n is 808 this function returns True\n -- if n is 1 this function returns True\n -- if n is 6556 this function returns True\n\n -- if n is 6557 this function returns False\n -- if n is 228 this function returns False\n -- if n is 81 this function returns False\n \"\"\"\n forwards = str(n)\n backwards = str(n)[::-1]\n return forwards == backwards\n\n\ndef is_prime(n):\n \"\"\"\n What comes in: An integer n >= 2.\n What goes out: True if the given integer is prime, else False.\n Side effects: None.\n Examples:\n -- is_prime(11) returns True\n -- is_prime(12) returns False\n -- is_prime(2) returns True\n Note: The algorithm used here is simple and clear but slow.\n \"\"\"\n for k in range(2, n // 2 + 1):\n if n % k == 0:\n return False\n return True\n\n\ndef test_problem1a():\n \"\"\" Tests the problem1a function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1a function:')\n print('--------------------------------------------------')\n expected = 95\n answer = problem1a(5, 2)\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n expected = 1576\n answer = problem1a(10, 3)\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n expected = 32312\n answer = problem1a(15, 4)\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n expected = 639655\n answer = problem1a(20, 5)\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n expected = 13321704\n answer = problem1a(25, 6)\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n expected = 283359305\n answer = problem1a(30, 7)\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n\n\ndef problem1a(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Returns the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n Side effects: None.\n Examples:\n -- If m is 11 and p = 2, this function returns 83,\n because the sum of the primes\n between 4 and (5 to the 2nd power, i.e. 25) is:\n 11 + 13 + 17 + 19 + 23, which is 83.\n -- If m is 70 and p = 3, this function returns 1025,\n because the sum of the primes between 70 and\n (5 to the 3rd power, i.e. 125) is:\n 71 + 73 + 79 + 83 + 89 + 97 + 101 + 103 + 107 + 109 + 113,\n which is 1025.\n -- If m is 2 and p = 1, this function returns 10,\n because the sum of the primes between 2 and\n (5 to the 1st power, i.e. 5) is:\n 2 + 3 + 5, which is 10.\n -- If m is 1000 and p = 6,\n this function returns 13245677 (trust me!)\n \"\"\"\n tot = 0\n for i in range(m, 5 ** p + 1):\n if is_prime(i) == True:\n tot = tot + i\n return tot\n\n\ndef test_problem1b():\n \"\"\" Tests the problem1b function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1b function:')\n print('--------------------------------------------------')\n expected = True\n answer = problem1b(17, 2)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(18, 2)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(85, 3)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(89, 3)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(90, 3)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(449, 4)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(450, 4)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(457, 4)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(458, 4)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(569, 5)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(570, 5)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(571, 5)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(572, 5)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(15610, 6)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(15600, 6)\n print()\n print('Test 15 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(10000, 6)\n print()\n print('Test 16 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = True\n answer = problem1b(5861, 6)\n print()\n print('Test 17 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = False\n answer = problem1b(5862, 6)\n print()\n print('Test 18 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\ndef problem1b(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Let X = the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n This function returns True if X is prime.\n This function returns False if X is NOT a prime.\n Side effects: None.\n Examples:\n -- If m is 17 and p = 2, this function returns True,\n because the sum of the primes\n between 17 and (5 to the 2nd power, i.e. 25) is:\n 17 + 19 + 23, which is 59,\n and 59 IS prime.\n -- If m is 18 and p = 2, this function returns False,\n because the sum of the primes\n between 18 and (5 to the 2nd power, i.e. 25) is:\n 19 + 23, which is 42,\n and 42 is NOT prime.\n -- If m is 85 and p = 3, this function returns True,\n because the sum of the primes\n between 85 and (5 to the 3rd power, i.e. 125) is:\n 89 + 91 + 97 + 101 + 103 + 107 + 109 + 113, which is 719,\n and 719 IS prime.\n \"\"\"\n if is_prime(problem1a(m, p)) == True:\n return True\n else:\n return False\n\n\ndef test_problem1c():\n \"\"\" Tests the problem1c function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1c function:')\n print('--------------------------------------------------')\n expected = 5 * 10\n answer = problem1c(50, 100)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 2 * 8\n answer = problem1c(23, 53)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 2 * 5\n answer = problem1c(33, 53)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 1 * 0\n answer = problem1c(20, 22)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 4 * 7\n answer = problem1c(101, 131)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 2 * 5\n answer = problem1c(102, 130)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 107 * 168\n answer = problem1c(2, 1000)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 90 * 1061\n answer = problem1c(1000, 10000)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 83 * 133\n answer = problem1c(101, 929)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 83 * 133\n answer = problem1c(100, 930)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 81 * 131\n answer = problem1c(102, 928)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 82 * 132\n answer = problem1c(101, 928)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 82 * 132\n answer = problem1c(102, 929)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n expected = 280 * 2237\n answer = problem1c(100, 20000)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\ndef problem1c(m, n):\n \"\"\"\n What comes in: Positive integers m and n, with m <= n.\n What goes out: Returns the product XY where:\n -- X is the number of integers from m to n, inclusive,\n that are PALINDROMES.\n -- Y is the number of integers from m to n, inclusive,\n that are PRIME.\n Side effects: None.\n Examples:\n -- If m is 50 and n is 100:\n this function returns 5 * 10, which is 50,\n because the palindromes between 50 and 100 are:\n 55 66 77 88 99 [so there are 5 of them]\n and the primes between 50 and 100 are:\n 53 59 61 67 71 73 79 83 89 97\n [so there are 10 of them]\n -- If m is 23 and n is 53:\n this function returns 2 * 8, which is 16,\n because the palindromes between 23 and 53 are\n 33 44 [so there are 2 of them]\n and the primes between 23 and 53 are\n 23 29 31 37 41 43 47 53\n [so there are 8 of them]\n \"\"\"\n count = 0\n count1 = 0\n for i in range(m, n + 1):\n if is_palindrome(i) == True:\n count += 1\n if is_prime(i) == True:\n count1 += 1\n return count * count1\n\n\nmain()\n",
"step-5": "\"\"\"\nTest 1, problem 1.\n\nAuthors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,\n their colleagues and Nathan Gupta. March 2016.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\n\ndef main():\n \"\"\" Calls the TEST functions in this module. \"\"\"\n test_problem1a()\n test_problem1b()\n test_problem1c()\n\n\ndef is_palindrome(n):\n \"\"\"\n What comes in: An non-negative integer n.\n What goes out: Returns True if the given integer is a palindrome,\n that is, if it reads the same backwards and forwards.\n Returns False if the given integer is not a palindrome.\n Side effects: None.\n Examples:\n -- if n is 12344321 this function returns True\n -- if n is 121121 this function returns True\n -- if n is 372273 this function returns True\n -- if n is 88 this function returns True\n -- if n is 808 this function returns True\n -- if n is 1 this function returns True\n -- if n is 6556 this function returns True\n\n -- if n is 6557 this function returns False\n -- if n is 228 this function returns False\n -- if n is 81 this function returns False\n \"\"\"\n ####################################################################\n # Ask your instructor for help if you do not understand\n # the green doc-string above.\n ####################################################################\n forwards = str(n)\n backwards = str(n)[::-1]\n return forwards == backwards\n\n # ------------------------------------------------------------------\n # Students:\n # Do NOT touch the above is_palindrome function\n # - it has no TODO.\n # Do NOT copy code from this function.\n #\n # Instead, ** CALL ** this function as needed in the problems below.\n # ------------------------------------------------------------------\n\n\ndef is_prime(n):\n \"\"\"\n What comes in: An integer n >= 2.\n What goes out: True if the given integer is prime, else False.\n Side effects: None.\n Examples:\n -- is_prime(11) returns True\n -- is_prime(12) returns False\n -- is_prime(2) returns True\n Note: The algorithm used here is simple and clear but slow.\n \"\"\"\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n\n return True\n # ------------------------------------------------------------------\n # Students:\n # Do NOT touch the above is_prime function - it has no TODO.\n # Do NOT copy code from this function.\n #\n # Instead, ** CALL ** this function as needed in the problems below.\n # ------------------------------------------------------------------\n\n\ndef test_problem1a():\n \"\"\" Tests the problem1a function. \"\"\"\n # ------------------------------------------------------------------\n # DONE: 2. Implement this TEST function.\n # It TESTS the problem1a function defined below.\n # Include at least ** 5 ** tests.\n #\n # Use the same 4-step process as for previous TEST functions.\n # In particular, include both EXPECTED and ACTUAL results.\n # ------------------------------------------------------------------\n print()\n print('--------------------------------------------------')\n print('Testing the problem1a function:')\n print('--------------------------------------------------')\n\n expected = 95\n answer = problem1a(5, 2)\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n\n expected = 1576\n answer = problem1a(10, 3)\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n\n expected = 32312\n answer = problem1a(15, 4)\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n\n expected = 639655\n answer = problem1a(20, 5)\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n\n expected = 13321704\n answer = problem1a(25, 6)\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n\n # This test takes some time to finish but it does work.\n expected = 283359305\n answer = problem1a(30, 7)\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n\n\ndef problem1a(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Returns the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n Side effects: None.\n Examples:\n -- If m is 11 and p = 2, this function returns 83,\n because the sum of the primes\n between 4 and (5 to the 2nd power, i.e. 25) is:\n 11 + 13 + 17 + 19 + 23, which is 83.\n -- If m is 70 and p = 3, this function returns 1025,\n because the sum of the primes between 70 and\n (5 to the 3rd power, i.e. 125) is:\n 71 + 73 + 79 + 83 + 89 + 97 + 101 + 103 + 107 + 109 + 113,\n which is 1025.\n -- If m is 2 and p = 1, this function returns 10,\n because the sum of the primes between 2 and\n (5 to the 1st power, i.e. 5) is:\n 2 + 3 + 5, which is 10.\n -- If m is 1000 and p = 6,\n this function returns 13245677 (trust me!)\n \"\"\"\n # ------------------------------------------------------------------\n # DONE: 3. Implement and test this function.\n # Note that you should write its TEST function first (above).\n #\n ####################################################################\n # IMPORTANT:\n # ** For full credit you must appropriately use (call)\n # ** the is_prime function that is defined above.\n ####################################################################\n # ------------------------------------------------------------------\n tot = 0\n for i in range(m, 5 ** p + 1):\n if is_prime(i) == True:\n tot = tot + i\n return tot\n\ndef test_problem1b():\n \"\"\" Tests the problem1b function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1b function:')\n print('--------------------------------------------------')\n\n ####################################################################\n # THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.\n # You may add more tests if you want,\n # but you are not required to do so.\n ####################################################################\n\n # Test 1:\n expected = True\n answer = problem1b(17, 2)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 2:\n expected = False\n answer = problem1b(18, 2)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 3:\n expected = True\n answer = problem1b(85, 3)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 4:\n expected = True\n answer = problem1b(89, 3)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 5:\n expected = False\n answer = problem1b(90, 3)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 6:\n expected = False\n answer = problem1b(449, 4)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 7:\n expected = True\n answer = problem1b(450, 4)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 8:\n expected = True\n answer = problem1b(457, 4)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 9:\n expected = False\n answer = problem1b(458, 4)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 10:\n expected = False\n answer = problem1b(569, 5)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 11:\n expected = True\n answer = problem1b(570, 5)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 12:\n expected = True\n answer = problem1b(571, 5)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 13:\n expected = False\n answer = problem1b(572, 5)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 14:\n expected = True\n answer = problem1b(15610, 6)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 15:\n expected = False\n answer = problem1b(15600, 6)\n print()\n print('Test 15 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 16:\n expected = False\n answer = problem1b(10000, 6)\n print()\n print('Test 16 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 17:\n expected = True\n answer = problem1b(5861, 6)\n print()\n print('Test 17 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 18:\n expected = False\n answer = problem1b(5862, 6)\n print()\n print('Test 18 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\ndef problem1b(m, p):\n \"\"\"\n What comes in: Positive integers m and p,\n with m >= 2 and (5 raised to the pth power) >= m.\n What goes out: Let X = the sum of all the integers\n between m and (5 raised to the pth power), inclusive,\n that are prime.\n This function returns True if X is prime.\n This function returns False if X is NOT a prime.\n Side effects: None.\n Examples:\n -- If m is 17 and p = 2, this function returns True,\n because the sum of the primes\n between 17 and (5 to the 2nd power, i.e. 25) is:\n 17 + 19 + 23, which is 59,\n and 59 IS prime.\n -- If m is 18 and p = 2, this function returns False,\n because the sum of the primes\n between 18 and (5 to the 2nd power, i.e. 25) is:\n 19 + 23, which is 42,\n and 42 is NOT prime.\n -- If m is 85 and p = 3, this function returns True,\n because the sum of the primes\n between 85 and (5 to the 3rd power, i.e. 125) is:\n 89 + 91 + 97 + 101 + 103 + 107 + 109 + 113, which is 719,\n and 719 IS prime.\n \"\"\"\n # ------------------------------------------------------------------\n # DONE: 4. Implement and test this function.\n # Tests have been written for you (above).\n ####################################################################\n # IMPORTANT:\n # ** For full credit you must appropriately use (call)\n # ** the appropriate functions that are defined above\n # ** possibly including ones you have written.\n ####################################################################\n # ------------------------------------------------------------------\n\n if is_prime(problem1a(m, p)) == True:\n return True\n else:\n return False\n\n\ndef test_problem1c():\n \"\"\" Tests the problem1c function. \"\"\"\n print()\n print('--------------------------------------------------')\n print('Testing the problem1c function:')\n print('--------------------------------------------------')\n\n ####################################################################\n # THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.\n # You may add more tests if you want,\n # but you are not required to do so.\n ####################################################################\n\n # Test 1:\n expected = 5 * 10\n answer = problem1c(50, 100)\n print()\n print('Test 1 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 2:\n expected = 2 * 8\n answer = problem1c(23, 53)\n print()\n print('Test 2 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 3:\n expected = 2 * 5\n answer = problem1c(33, 53)\n print()\n print('Test 3 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 4:\n expected = 1 * 0\n answer = problem1c(20, 22)\n print()\n print('Test 4 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 5:\n expected = 4 * 7\n answer = problem1c(101, 131)\n print()\n print('Test 5 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 6:\n expected = 2 * 5\n answer = problem1c(102, 130)\n print()\n print('Test 6 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 7:\n expected = 107 * 168\n answer = problem1c(2, 1000)\n print()\n print('Test 7 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 8:\n expected = 90 * 1061\n answer = problem1c(1000, 10000)\n print()\n print('Test 8 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 9:\n expected = 83 * 133\n answer = problem1c(101, 929)\n print()\n print('Test 9 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 10:\n expected = 83 * 133\n answer = problem1c(100, 930)\n print()\n print('Test 10 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 11:\n expected = 81 * 131\n answer = problem1c(102, 928)\n print()\n print('Test 11 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 12:\n expected = 82 * 132\n answer = problem1c(101, 928)\n print()\n print('Test 12 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 13:\n expected = 82 * 132\n answer = problem1c(102, 929)\n print()\n print('Test 13 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n # Test 14:\n expected = 280 * 2237\n answer = problem1c(100, 20000)\n print()\n print('Test 14 expected:', expected)\n print(' actual: ', answer)\n if expected != answer:\n print(' **** THIS TEST FAILED. ****')\n\n\ndef problem1c(m, n):\n \"\"\"\n What comes in: Positive integers m and n, with m <= n.\n What goes out: Returns the product XY where:\n -- X is the number of integers from m to n, inclusive,\n that are PALINDROMES.\n -- Y is the number of integers from m to n, inclusive,\n that are PRIME.\n Side effects: None.\n Examples:\n -- If m is 50 and n is 100:\n this function returns 5 * 10, which is 50,\n because the palindromes between 50 and 100 are:\n 55 66 77 88 99 [so there are 5 of them]\n and the primes between 50 and 100 are:\n 53 59 61 67 71 73 79 83 89 97\n [so there are 10 of them]\n -- If m is 23 and n is 53:\n this function returns 2 * 8, which is 16,\n because the palindromes between 23 and 53 are\n 33 44 [so there are 2 of them]\n and the primes between 23 and 53 are\n 23 29 31 37 41 43 47 53\n [so there are 8 of them]\n \"\"\"\n # ------------------------------------------------------------------\n # DONE: 4. Implement and test this function.\n # Tests have been written for you (above).\n ####################################################################\n # IMPORTANT:\n # ** For full credit you must appropriately use (call)\n # ** the appropriate functions that are defined above.\n ####################################################################\n # ------------------------------------------------------------------\n count = 0\n count1 = 0\n for i in range(m, n + 1):\n if is_palindrome(i) == True:\n count += 1\n if is_prime(i) == True:\n count1 += 1\n return count * count1\n\n\n# ----------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# ----------------------------------------------------------------------\nmain()\n",
"step-ids": [
0,
5,
9,
10,
11
]
}
|
[
0,
5,
9,
10,
11
] |
# 파이썬 딕셔너리
# 범용적으로 가장 많이 사용되는 타입
# key와 value의 대용관계 type
# 순서 X, key 중복 X, 수정 O, 삭제 O
# {}
# class란 실세계(오브젝트)의 명사,동사적인 특징들을 추상화시키는 것, 즉 프로그램 내 인스턴트(객체)를 추출하는 템플릿이다
# class는 틀이고 인스턴스는 틀에의해 만들어지는 결과물.하여 instance.class()로 표현
#temp = {}
#print(type(temp))
dic01 = {'name' : 'seop',
'age' : 48,
'address' : 'seoul',
'birth' : '730919',
'gender' : True}
#print('dic - ',dic01,type(dic01))
#print(dir(dic01)) #iter가 있으므로 순환반복가능
# key 유무를 판단하기 위해서
#print('name' in dic01)
# 요소를 추가하는 방법
#dic01['marriage'] = False
#print(dic01)
#dic01['marriage'] = True
#print(dic01)
# 데이터 확인
#print("데이터 확인 - ",dic01['birth'])
#개발자성향에 따라 데이터를 리스트하여 관리하기도함, 각각의 값들은 튜플 이용
dic02 = dict( [('name' , 'seop'),
('age' , 48),
('address' , 'seoul'),
('birth','730919'),
('gender' , True)] )
#print("tuple을 이용한 dict 생성 -",dic02)
#변수에다 값을 할당하는 방법
dic03 = dict( name = 'seop',
age = 48,
address = 'seoul',
birth = '730919',
gender = True)
#출력
#print('dic03 -',dic03['Name']) #키값을 이용, 대소문자 Name때문에 오류남
#print('dic03 -',dic03.get('Name')) #함수를 이용, 해당하는 밸류를 가져오는 것이라 Name에 담긴 값들이 없어서 None을 출력
#print('len - ', len(dic03))
# dict_keys(키), dict_values(값), dict_items(키와값)
#print('dict_keys -',list(dic03.keys())) #각각의 키들은 리스트, 루프를 돌려서 값들을 꺼내올 수도 있다, 리스트화 시킬수도 있음
#print('dict_values -', list(dic03.values())) #밸류도 키와 마찬가지로 각각의 값 리스트
#print('dict_items -',list(dic03.items()))
# for key in dic03.keys() :
# print("{0},{1}".format(key,dic03[key]))
# print("key : {0}, value : {1}".format(key,dic03.get(key)))
# for value in dic03.values() :
# print(value)
# 튜플 패킹 & 언패킹
#t = ('foo','bar','baz','qux') ##괄호형 쳐주고 선언하는 것,패킹
#print(type(t))
#(x1,x2,x3,x4) = t ##다른변수에 담을때 언패킹해서 담아준다(괄호형은 보기편하게묶은것), 언패킹할때 튜플에 있는 값들의 개수가 담을 변수의 개수에 맞게 선언이 되어있어야함
#(x1,x2,x3,x4) = ('foo','bar','baz','qux')
#print(x1,x2,x3,x4)
a, *b, c = (0,1,2,3,4,5) #언패킹할때 개수가 안맞을때 *를 사용하여 처리할 수도 있음, 보통 *이 하나만 나오는 경우가 많음
#print(a)
#print(b)
#print(c)
#for (key , value) in dic03.items() :
# print("key : {0}, value : {1}".format(key,value))
# 삭제 pop(), del
#del dic03['gender']
#print(dic03)
#print('pop -', dic03.pop('birth'))
#print('dic03 - ',dic03)
#dic03.clear()
#print('dic03 - ', dic03)
|
normal
|
{
"blob_id": "d1077107a5cd3a9f489f74b030a698b0521841f3",
"index": 7721,
"step-1": "<mask token>\n",
"step-2": "dic01 = {'name': 'seop', 'age': 48, 'address': 'seoul', 'birth': '730919',\n 'gender': True}\ndic02 = dict([('name', 'seop'), ('age', 48), ('address', 'seoul'), ('birth',\n '730919'), ('gender', True)])\ndic03 = dict(name='seop', age=48, address='seoul', birth='730919', gender=True)\na, *b, c = 0, 1, 2, 3, 4, 5\n",
"step-3": "# 파이썬 딕셔너리\r\n# 범용적으로 가장 많이 사용되는 타입\r\n# key와 value의 대용관계 type\r\n# 순서 X, key 중복 X, 수정 O, 삭제 O\r\n# {}\r\n# class란 실세계(오브젝트)의 명사,동사적인 특징들을 추상화시키는 것, 즉 프로그램 내 인스턴트(객체)를 추출하는 템플릿이다\r\n# class는 틀이고 인스턴스는 틀에의해 만들어지는 결과물.하여 instance.class()로 표현\r\n\r\n#temp = {}\r\n#print(type(temp))\r\n\r\ndic01 = {'name' : 'seop',\r\n 'age' : 48,\r\n 'address' : 'seoul',\r\n 'birth' : '730919',\r\n 'gender' : True}\r\n#print('dic - ',dic01,type(dic01))\r\n#print(dir(dic01)) #iter가 있으므로 순환반복가능\r\n\r\n# key 유무를 판단하기 위해서\r\n#print('name' in dic01)\r\n\r\n# 요소를 추가하는 방법\r\n#dic01['marriage'] = False\r\n#print(dic01)\r\n\r\n#dic01['marriage'] = True\r\n#print(dic01)\r\n\r\n# 데이터 확인\r\n#print(\"데이터 확인 - \",dic01['birth'])\r\n\r\n#개발자성향에 따라 데이터를 리스트하여 관리하기도함, 각각의 값들은 튜플 이용\r\ndic02 = dict( [('name' , 'seop'),\r\n ('age' , 48),\r\n ('address' , 'seoul'),\r\n ('birth','730919'),\r\n ('gender' , True)] )\r\n#print(\"tuple을 이용한 dict 생성 -\",dic02)\r\n\r\n#변수에다 값을 할당하는 방법\r\ndic03 = dict( name = 'seop',\r\n age = 48,\r\n address = 'seoul',\r\n birth = '730919',\r\n gender = True)\r\n\r\n\r\n#출력\r\n#print('dic03 -',dic03['Name']) #키값을 이용, 대소문자 Name때문에 오류남\r\n#print('dic03 -',dic03.get('Name')) #함수를 이용, 해당하는 밸류를 가져오는 것이라 Name에 담긴 값들이 없어서 None을 출력\r\n\r\n#print('len - ', len(dic03))\r\n\r\n# dict_keys(키), dict_values(값), dict_items(키와값)\r\n#print('dict_keys -',list(dic03.keys())) #각각의 키들은 리스트, 루프를 돌려서 값들을 꺼내올 수도 있다, 리스트화 시킬수도 있음\r\n#print('dict_values -', list(dic03.values())) #밸류도 키와 마찬가지로 각각의 값 리스트\r\n#print('dict_items -',list(dic03.items()))\r\n\r\n# for key in dic03.keys() :\r\n # print(\"{0},{1}\".format(key,dic03[key]))\r\n # print(\"key : {0}, value : {1}\".format(key,dic03.get(key)))\r\n# for value in dic03.values() :\r\n# print(value)\r\n\r\n# 튜플 패킹 & 언패킹\r\n#t = ('foo','bar','baz','qux') ##괄호형 쳐주고 선언하는 것,패킹\r\n#print(type(t))\r\n#(x1,x2,x3,x4) = t ##다른변수에 담을때 언패킹해서 담아준다(괄호형은 보기편하게묶은것), 언패킹할때 튜플에 있는 값들의 개수가 담을 변수의 개수에 맞게 선언이 되어있어야함\r\n#(x1,x2,x3,x4) = ('foo','bar','baz','qux')\r\n#print(x1,x2,x3,x4)\r\n\r\na, *b, c = (0,1,2,3,4,5) #언패킹할때 개수가 안맞을때 *를 사용하여 처리할 수도 있음, 보통 *이 하나만 나오는 경우가 많음\r\n#print(a)\r\n#print(b)\r\n#print(c)\r\n\r\n#for (key , value) in dic03.items() :\r\n# print(\"key : {0}, value : {1}\".format(key,value))\r\n\r\n\r\n# 삭제 pop(), del\r\n#del dic03['gender']\r\n#print(dic03)\r\n\r\n#print('pop -', dic03.pop('birth'))\r\n#print('dic03 - ',dic03)\r\n\r\n#dic03.clear()\r\n#print('dic03 - ', dic03)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
###########Seq_Profile_Blast_Parser_tool################################
import csv
import time
import re
import os
import sys
from collections import Counter
import operator
from fractions import *
import glob
import ntpath
from collections import defaultdict
path = open('config.txt').read().splitlines()[0].split('=')[-1]
rootDir = '.'
blast_files = []
curdir = os.getcwd()
curdir_up = '/'.join(curdir.split('/')[:-1])
for dirName, subdirList, fileList in os.walk(rootDir, topdown = False):
for fname in fileList:
if fname.startswith("S.A"):
fname = os.path.join(dirName, fname)
blast_files.append(fname)
print 'Module1'
print ' step 1.1 : Parsing the input Blastp files'
for blastfiles in blast_files[:]:
if 'Prot' not in blastfiles:
qids=[]
query_lengths = []
counter = 0
seqsas = []
file1 = open(blastfiles,'r').read()
queries = file1.split('Query=')
datas = queries[1:]
for item in datas[:]:
lines = item.split('\n')
qid = item.split()[0]
qids.append(qid)
for line in lines[:]:
if line.startswith('Length='):
query_lengths.append(int(line.split('=')[-1]))
break
for i,data in enumerate(datas[:]):
lines = data.split('\n')
record = False
for line in lines[:]:
if line.startswith(">") :
tmp = line.split(">")
tmp_name = tmp[1]
tmp_name1 = tmp_name.split("[")
tmp_hit = ''.join(tmp_name[0:-1])
if 'Staphylococcus' in line:
record = True
else:
record = False
if line.startswith(" Score") and record:
tmp = line.strip().split()
tmp_score_s = tmp[2]
tmp_score = float(tmp_score_s)
tmp_evalue = float(tmp[7].replace(",",""))
seqsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])
if line.startswith(" Identities")and counter <len(seqsas) and record:
tmp = line.strip().split()
tmp_id = tmp[3]
tmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')
ids = int(tmp_ids)
tmp_po = tmp[7]
tmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')
pos = int(tmp_pos)
tmp_gap = tmp[11]
tmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')
gaps_percent = int(tmp_gaps)
gap_number = int(tmp[10].split('/')[0])
alignment_length = int(tmp[10].split('/')[-1])
coverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)
seqsas[counter].append(ids)
seqsas[counter].append(pos)
seqsas[counter].append(gaps_percent)
seqsas[counter].append(gap_number)
seqsas[counter].append(alignment_length)
seqsas[counter].append(coverage_percent)
counter+=1
path1 = '%s/RESULT/MODULE1/P1' % curdir_up
if not os.path.exists(path1):
os.makedirs(path1)
file_name = ntpath.basename('blast_out1%s' % blastfiles) + '.txt'
with open(os.path.join(path1,file_name),'w') as out1:
for item in seqsas[:]:
item = '\t'.join([str(x) for x in item])
out1.write('%s\n' %item)
out1.close()
else:
strsas = []
qids=[]
query_lengths = []
counter = 0
file2 = open(blastfiles,'r').read()
queries = file2.split('Query=')
datas = queries[1:]
for item in datas[:]:
lines = item.split('\n')
qid = item.split()[0]
qids.append(qid)
for line in lines[:]:
if line.startswith('Length='):
query_lengths.append(int(line.split('=')[-1]))
break
for i,data in enumerate(datas[:]):
lines = data.split('\n')
record = False
for line in lines[:]:
if line.startswith(">") :
tmp = line.split(">")
tmp_name = tmp[1]
tmp_hit = tmp_name.split("|")[0]
if line.startswith(" Score") :
tmp = line.strip().split()
tmp_score_s = tmp[2]
tmp_score = float(tmp_score_s)
tmp_evalue = float(tmp[7].replace(",",""))
strsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])
if line.startswith(" Identities") and counter < len(strsas):
tmp = line.strip().split()
tmp_id = tmp[3]
tmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')
ids = int(tmp_ids)
tmp_po = tmp[7]
tmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')
pos = int(tmp_pos)
tmp_gap = tmp[11]
tmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')
gaps_percent = int(tmp_gaps)
gap_number_1 = Fraction(tmp[10])
gap_number = int(tmp[10].split('/')[0])
alignment_length = int(tmp[10].split('/')[-1])
coverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)
strsas[counter].append(ids)
strsas[counter].append(pos)
strsas[counter].append(gaps_percent)
strsas[counter].append(gap_number)
strsas[counter].append(alignment_length)
strsas[counter].append(coverage_percent)
counter +=1
path1 = '%s/RESULT/MODULE1/P1' %curdir_up
if not os.path.exists(path1):
os.makedirs(path1)
prot_file_name = ntpath.basename('prot_blast_out1%s' % blastfiles) + '.txt'
with open(os.path.join(path1,prot_file_name),'w') as out2:
for item in strsas[:]:
item = '\t'.join([str(x) for x in item])
out2.write('%s\n' %item)
out2.close()
def parser2():
os.chdir('%s/RESULT/MODULE1/P1' %curdir_up)
for file1 in glob.glob('*.txt'):
file_s = open(file1).readlines()
prepsas = []
for item in file_s[:]:
item = item.strip().split('\t')
hit = item[1]
e = float(item[3])
ids = int(item[4])
cov = float(item[9])
if e <=1e-10 and ids >= 35 and cov >= 75:
prepsas.append(item)
if len(item) < 10:
print 'not match'
prot_file_name_s = str(file1)
path2 = '%s/RESULT/MODULE1/P2' %curdir_up
if not os.path.exists(path2):
os.makedirs(path2)
with open(os.path.join(path2,prot_file_name_s),'w') as prepsas1:
for hits in prepsas[:]:
hits = '\t'.join([str(x) for x in hits])
prepsas1.write('%s\n' %hits)
prepsas1.close()
def parser3():
os.chdir('%s/RESULT/MODULE1/P2' %curdir_up)
for file2 in glob.glob('*.txt'):
file3 =open(file2).readlines()
d = {}
for filters in file3[:]:
key, value = filters.strip("\n").split("\t")[0],filters.strip("\n").split("\t")[1:]
key = key.strip('\t')
value = [str(x)[0:]for x in value]
if key not in d:
d[key] = [value]
elif key in d and len(d[key]) <= 250:
d[key].append(value)
prot_file_name_s = str(file2)
path2 = '%s/RESULT/MODULE1/P3' %curdir_up
if not os.path.exists(path2):
os.makedirs(path2)
with open(os.path.join(path2,prot_file_name_s),'w') as fp:
for item in d.keys()[:]:
line = item
hits = d[item]
for hit in hits:
hit2 = ','.join(hit)
line += '\t%s' % hit2
fp.write("%s\n" % line)
parser2()
parser3()
|
normal
|
{
"blob_id": "7eb8fe491a88bcfadf2a38eaa158b74b21514a1c",
"index": 8431,
"step-1": "###########Seq_Profile_Blast_Parser_tool################################\nimport csv\nimport time\nimport re\nimport os\nimport sys\nfrom collections import Counter\nimport operator\nfrom fractions import *\nimport glob\nimport ntpath\nfrom collections import defaultdict\n\n\n\npath = open('config.txt').read().splitlines()[0].split('=')[-1]\n\nrootDir = '.'\t\t\nblast_files = []\ncurdir = os.getcwd()\ncurdir_up = '/'.join(curdir.split('/')[:-1])\n\n\nfor dirName, subdirList, fileList in os.walk(rootDir, topdown = False):\n\t\n\tfor fname in fileList:\n\t\t\n\t\tif fname.startswith(\"S.A\"):\n\t\t\t\n\t\t\tfname = os.path.join(dirName, fname)\n\t\t\tblast_files.append(fname)\n\nprint 'Module1'\nprint '\t\tstep 1.1 : Parsing the input Blastp files'\t\n\n\nfor blastfiles in blast_files[:]:\n\n\tif 'Prot' not in blastfiles:\n\t\t\n\t\tqids=[]\n\t\tquery_lengths = []\n\t\tcounter = 0\n\t\tseqsas = []\n\t\tfile1 = open(blastfiles,'r').read()\n\t\tqueries = file1.split('Query=')\n\t\tdatas = queries[1:]\t\n\t\t\n\t\tfor item in datas[:]:\n\t\t\t\n\t\t\t\tlines = item.split('\\n')\n\t\t\t\tqid = item.split()[0]\n\t\t\t\tqids.append(qid)\n\t\t\t\t\n\t\t\t\tfor line in lines[:]:\n\t\t\t\t\t\n\t\t\t\t\tif line.startswith('Length='):\n\t\t\t\t\t\t\n\t\t\t\t\t\tquery_lengths.append(int(line.split('=')[-1]))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\tfor i,data in enumerate(datas[:]):\n\t\t\t\n\t\t\tlines = data.split('\\n')\n\t\t\trecord = False\n\t\t\t\n\t\t\tfor line in lines[:]:\n\t\t\t\t\n\t\t\t\tif line.startswith(\">\") :\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.split(\">\")\n\t\t\t\t\ttmp_name = tmp[1]\n\t\t\t\t\ttmp_name1 = tmp_name.split(\"[\")\t\n\t\t\t\t\ttmp_hit = ''.join(tmp_name[0:-1])\n\t\t\t\t\t\n\t\t\t\t\tif 'Staphylococcus' in line:\n\t\t\t\t\t\t\n\t\t\t\t\t\trecord = True\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\t\n\t\t\t\t\t\trecord = False\n\t\t\t\t\t\t\n\t\t\t\tif line.startswith(\" Score\") and record:\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split()\n\t\t\t\t\ttmp_score_s = tmp[2]\n\t\t\t\t\ttmp_score = float(tmp_score_s)\n\t\t\t\t\ttmp_evalue = float(tmp[7].replace(\",\",\"\"))\n\t\t\t\t\tseqsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])\n\t\t\n\t\t\t\tif line.startswith(\" Identities\")and counter <len(seqsas) and record:\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split() \n\t\t\t\t\ttmp_id = tmp[3]\n\t\t\t\t\ttmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tids = int(tmp_ids)\n\t\t\t\t\ttmp_po = tmp[7]\n\t\t\t\t\ttmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tpos = int(tmp_pos)\n\t\t\t\t\ttmp_gap = tmp[11]\n\t\t\t\t\ttmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tgaps_percent = int(tmp_gaps)\n\t\t\t\t\tgap_number = int(tmp[10].split('/')[0])\n\t\t\t\t\talignment_length = int(tmp[10].split('/')[-1])\n\t\t\t\t\tcoverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)\n\t\t\t\t\tseqsas[counter].append(ids)\n\t\t\t\t\tseqsas[counter].append(pos)\n\t\t\t\t\tseqsas[counter].append(gaps_percent)\n\t\t\t\t\tseqsas[counter].append(gap_number)\n\t\t\t\t\tseqsas[counter].append(alignment_length)\n\t\t\t\t\tseqsas[counter].append(coverage_percent)\n\t\t\t\t\tcounter+=1\n\t\t\t\t\t\n\t\tpath1 = '%s/RESULT/MODULE1/P1' % curdir_up\n\n\t\tif not os.path.exists(path1):\n\t\t\t\n\t\t\tos.makedirs(path1)\n\t\tfile_name = ntpath.basename('blast_out1%s' % blastfiles) + '.txt'\n\t\t\n\t\twith open(os.path.join(path1,file_name),'w') as out1:\n\t\t\t\n\t\t\tfor item in seqsas[:]:\n\t\t\t\t\n\t\t\t\titem = '\\t'.join([str(x) for x in item])\n\t\t\t\tout1.write('%s\\n' %item)\n\t\t\t\t\n\t\t\tout1.close()\n\t\t\t\t\t\n\telse:\n\t\t\n\t\tstrsas = []\n\t\tqids=[]\n\t\tquery_lengths = []\n\t\tcounter = 0\n\t\tfile2 = open(blastfiles,'r').read()\n\t\tqueries = file2.split('Query=')\n\t\tdatas = queries[1:]\t\n\t\t\n\t\tfor item in datas[:]:\n\t\t\t\n\t\t\t\tlines = item.split('\\n')\n\t\t\t\tqid = item.split()[0]\n\t\t\t\tqids.append(qid)\n\t\t\t\t\n\t\t\t\tfor line in lines[:]:\n\t\t\t\t\t\n\t\t\t\t\tif line.startswith('Length='):\n\t\t\t\t\t\t\n\t\t\t\t\t\tquery_lengths.append(int(line.split('=')[-1]))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\tfor i,data in enumerate(datas[:]):\n\t\t\t\n\t\t\tlines = data.split('\\n')\n\t\t\trecord = False\n\t\t\t\n\t\t\tfor line in lines[:]:\n\t\t\t\t\n\t\t\t\tif line.startswith(\">\") :\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.split(\">\")\n\t\t\t\t\ttmp_name = tmp[1]\n\t\t\t\t\ttmp_hit = tmp_name.split(\"|\")[0]\n\t\t\t\n\t\t\t\t\t\n\t\t\t\tif line.startswith(\" Score\") :\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split()\n\t\t\t\t\ttmp_score_s = tmp[2]\n\t\t\t\t\ttmp_score = float(tmp_score_s)\n\t\t\t\t\ttmp_evalue = float(tmp[7].replace(\",\",\"\"))\n\t\t\t\t\tstrsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])\n\t\t\n\t\t\t\tif line.startswith(\" Identities\") and counter < len(strsas):\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split()\n\t\t\t\t\ttmp_id = tmp[3]\n\t\t\t\t\ttmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tids = int(tmp_ids)\n\t\t\t\t\ttmp_po = tmp[7]\n\t\t\t\t\ttmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tpos = int(tmp_pos)\n\t\t\t\t\ttmp_gap = tmp[11]\n\t\t\t\t\ttmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tgaps_percent = int(tmp_gaps)\n\t\t\t\t\tgap_number_1 = Fraction(tmp[10])\n\t\t\t\t\tgap_number = int(tmp[10].split('/')[0])\n\t\t\t\t\talignment_length = int(tmp[10].split('/')[-1])\n\t\t\t\t\tcoverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)\n\t\t\t\t\tstrsas[counter].append(ids)\n\t\t\t\t\tstrsas[counter].append(pos)\n\t\t\t\t\tstrsas[counter].append(gaps_percent)\n\t\t\t\t\tstrsas[counter].append(gap_number)\n\t\t\t\t\tstrsas[counter].append(alignment_length)\n\t\t\t\t\tstrsas[counter].append(coverage_percent)\n\t\t\t\t\tcounter +=1\n\t\t\t\t\t\n\t\tpath1 = '%s/RESULT/MODULE1/P1' %curdir_up\n\t\t\n\t\tif not os.path.exists(path1):\n\t\t\t\n\t\t\tos.makedirs(path1)\n\t\tprot_file_name = ntpath.basename('prot_blast_out1%s' % blastfiles) + '.txt'\n\t\t\n\t\twith open(os.path.join(path1,prot_file_name),'w') as out2:\n\t\t\t\n\t\t\tfor item in strsas[:]:\n\t\t\t\t\n\t\t\t\titem = '\\t'.join([str(x) for x in item])\n\t\t\t\tout2.write('%s\\n' %item)\n\t\t\t\t\n\t\t\tout2.close()\n\t\t\ndef parser2():\n\t\n\t\tos.chdir('%s/RESULT/MODULE1/P1' %curdir_up)\n\t\t\n\t\tfor file1 in glob.glob('*.txt'):\n\t\t\tfile_s = open(file1).readlines()\n\t\t\tprepsas = []\n\t\t\t\n\t\t\tfor item in file_s[:]:\n\t\t\t\t\n\t\t\t\titem = item.strip().split('\\t')\n\t\t\t\thit = item[1]\n\t\t\t\te = float(item[3])\n\t\t\t\tids = int(item[4])\n\t\t\t\tcov = float(item[9])\n\t\t\t\tif e <=1e-10 and ids >= 35 and cov >= 75:\n\t\t\t\t\t\n\t\t\t\t\tprepsas.append(item)\n\t\t\t\t\t\t\n\t\t\t\tif len(item) < 10:\n\t\t\t\t\t\n\t\t\t\t\tprint 'not match'\n\t\t\t\n\t\t\tprot_file_name_s = str(file1) \n\t\t\t\n\t\t\tpath2 = '%s/RESULT/MODULE1/P2' %curdir_up\n\t\t\t\n\t\t\tif not os.path.exists(path2):\n\t\t\t\t\n\t\t\t\tos.makedirs(path2)\n\t\t\n\t\t\twith open(os.path.join(path2,prot_file_name_s),'w') as prepsas1:\n\t\t\t\t\n\t\t\t\tfor hits in prepsas[:]:\n\t\t\t\t\t\n\t\t\t\t\thits = '\\t'.join([str(x) for x in hits])\n\t\t\t\t\tprepsas1.write('%s\\n' %hits)\n\t\t\t\t\t\n\t\t\t\tprepsas1.close()\n\t\t\n\t\t\t\t\ndef parser3():\n\t\n\tos.chdir('%s/RESULT/MODULE1/P2' %curdir_up)\n\t\n\tfor file2 in glob.glob('*.txt'):\n\t\t\n\t\tfile3 =open(file2).readlines()\n\t\td = {}\n\t\t\n\t\tfor filters in file3[:]:\n\t\t\t\n\t\t\tkey, value = filters.strip(\"\\n\").split(\"\\t\")[0],filters.strip(\"\\n\").split(\"\\t\")[1:]\n\t\t\tkey = key.strip('\\t')\n\t\t\tvalue = [str(x)[0:]for x in value]\n\t\t\t\n\t\t\tif key not in d:\n\t\t\t\t\n\t\t\t\td[key] = [value]\n\t\t\t\t\n\t\t\telif key in d and len(d[key]) <= 250:\n\t\t\t\t\n\t\t\t\td[key].append(value)\n\t\t\t\t\n\t\tprot_file_name_s = str(file2) \n\t\t\n\t\tpath2 = '%s/RESULT/MODULE1/P3' %curdir_up\n\t\t\n\t\tif not os.path.exists(path2):\n\t\t\t\n\t\t\tos.makedirs(path2)\t\n\t\t\t\t\n\t\twith open(os.path.join(path2,prot_file_name_s),'w') as fp:\n\t\t\t\n\t\t\tfor item in d.keys()[:]:\n\t\t\t\t\n\t\t\t\tline = item\n\t\t\t\thits = d[item]\n\n\t\t\t\tfor hit in hits:\n\t\t\t\t\t\n\t\t\t\t\thit2 = ','.join(hit)\n\t\t\t\t\tline += '\\t%s' % hit2\n\t\t\t\t\t\n\t\t\t\tfp.write(\"%s\\n\" % line)\n\n\t\nparser2()\nparser3()\n\t\n\t\n\t\n\t\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import multiprocessing
import time
def foo():
time.sleep(0.1)
p = multiprocessing.Process(target=foo)
p.start()
print("process running: ", p, p.is_alive())
p.terminate()
print("process running: ", p, p.is_alive())
p.join()
print("process running: ", p, p.is_alive())
print("process exit code:", p.exitcode)
|
normal
|
{
"blob_id": "19aad7d45416e311530aa2ce3e854cf1f65d18f5",
"index": 960,
"step-1": "<mask token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\n<mask token>\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"step-3": "<mask token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"step-4": "import multiprocessing\nimport time\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"step-5": "import multiprocessing\nimport time\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint(\"process running: \", p, p.is_alive())\np.terminate()\nprint(\"process running: \", p, p.is_alive())\np.join()\nprint(\"process running: \", p, p.is_alive())\nprint(\"process exit code:\", p.exitcode)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import itertools
def possibleNumber(digitSet, n):
res = [[]]
pools = [digitSet] * n
# print(pools)
for pool in pools:
# print(res)
res = [ x + [y] for x in res for y in pool]
for prod in res:
yield prod
# def possibleNumber(digitSet, n):
# res = []
# temp = itertools.product(digitSet, repeat = n)
# for item in temp:
# res.append(item)
# return res
res = possibleNumber('23', 5)
for i in res:
print(i)
|
normal
|
{
"blob_id": "fcc6dd61b94d5fa7f088fc75b748d976d1b30fa5",
"index": 1781,
"step-1": "<mask token>\n\n\ndef possibleNumber(digitSet, n):\n res = [[]]\n pools = [digitSet] * n\n for pool in pools:\n res = [(x + [y]) for x in res for y in pool]\n for prod in res:\n yield prod\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef possibleNumber(digitSet, n):\n res = [[]]\n pools = [digitSet] * n\n for pool in pools:\n res = [(x + [y]) for x in res for y in pool]\n for prod in res:\n yield prod\n\n\n<mask token>\nfor i in res:\n print(i)\n",
"step-3": "<mask token>\n\n\ndef possibleNumber(digitSet, n):\n res = [[]]\n pools = [digitSet] * n\n for pool in pools:\n res = [(x + [y]) for x in res for y in pool]\n for prod in res:\n yield prod\n\n\nres = possibleNumber('23', 5)\nfor i in res:\n print(i)\n",
"step-4": "import itertools\n\n\ndef possibleNumber(digitSet, n):\n res = [[]]\n pools = [digitSet] * n\n for pool in pools:\n res = [(x + [y]) for x in res for y in pool]\n for prod in res:\n yield prod\n\n\nres = possibleNumber('23', 5)\nfor i in res:\n print(i)\n",
"step-5": "import itertools\n\ndef possibleNumber(digitSet, n):\n res = [[]]\n\n pools = [digitSet] * n\n # print(pools)\n for pool in pools:\n # print(res)\n res = [ x + [y] for x in res for y in pool]\n for prod in res:\n yield prod\n\n# def possibleNumber(digitSet, n):\n# res = []\n# temp = itertools.product(digitSet, repeat = n)\n# for item in temp:\n# res.append(item)\n# return res\n\nres = possibleNumber('23', 5)\nfor i in res:\n print(i)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
__version__ = '3.13.7'
|
normal
|
{
"blob_id": "01852f6dbeb78df3098b14d2f0538ad9193ea511",
"index": 9873,
"step-1": "<mask token>\n",
"step-2": "__version__ = '3.13.7'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''Module for generating and plotting networks.'''
from trafpy.generator.src import tools
import copy
import networkx as nx
import matplotlib.pyplot as plt
import json
def gen_arbitrary_network(num_eps,
ep_label=None,
ep_capacity=12500,
num_channels=1,
racks_dict=None,
topology_type=None):
'''Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
# must be str or not json serialisable
servers = [str(i) for i in range(num_eps)]
else:
servers = [ep_label+'_'+str(i) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
# ep_label is None
eps.append(node)
network.graph['endpoints'] = eps
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=ep_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server',
rack_label='rack',
N=0,
num_channels=2,
server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10,
show_fig=False):
'''Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0,1],
[0,3],
[0,2],
[1,2],
[1,7],
[3,8],
[3,4],
[3,6],
[4,5],
[4,5],
[5,2],
[5,13],
[5,12],
[6,7],
[7,10],
[8,11],
[8,9],
[9,10],
[9,12],
[10,11],
[10,13],
[11,12]]
if N == 0:
# above nodes are all end points
label = ep_label
else:
# above nodes are ToR switch nodes
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
# add 14 nodes
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
# assume all nodes are servers
racks_dict = None
else:
# each of 14 nodes in NSFNET is a ToR switch
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label+'_'+str(i))
network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet',
racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server',
num_channels=2,
server_to_rack_channel_capacity=500,
show_fig=False):
'''Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0,1),
(0,2),
(1,2),
(2,4),
(4,3),
(3,1)],weight=1)
servers = [ep_label+'_'+str(i) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
topology_type='5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
'''Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
'''
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4,
L=2,
n=4,
ep_label='server',
rack_label='rack',
edge_label='edge',
aggregate_label='agg',
core_label='core',
num_channels = 2,
server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000,
edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000,
rack_to_core_channel_capacity=2000,
show_fig=False):
'''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
'''
if L != 2 and L != 4:
raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))
if k % 2 != 0:
raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))
channel_names = gen_channel_names(num_channels)
# initialise network nodes
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]
#num_cores = int((k/2)**(L-1))
#num_cores = int((k/2)**2)
num_cores = int((k/2)**(L/2))
num_aggs = int((k**2)/2)
num_edges = int((k**2)/2)
num_pods = int(2*(k/2)**(L-2))
num_racks = int(2*(k/2)**(L-1))
num_servers = int(num_racks * n)
cores = [core_label+'_'+str(i) for i in range(num_cores)]
aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]
edges = [edge_label+'_'+str(i) for i in range(num_edges)]
racks = [rack_label+'_'+str(i) for i in range(num_racks)]
servers = [ep_label+'_'+str(i) for i in range(num_servers)]
# create core and rack layer networks
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
# combine cores and racks into single network
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
# 2 layers: Core, ToR
# link racks to cores, add link attributes
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
# have k/2 up-ports on each switch
for up_port in range(int(k/2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network,
(rack, core),
channel_names,
rack_to_core_channel_capacity)
else:
# 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.
# group edges and aggregates into pods
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + (k/2))
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
# create dict of pod networks
pod_labels = ['pod_'+str(i) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = ('pod_'+str(pod_iter),)
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
# connect edge and aggregate switches within pod, add link attributes
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key],
(pod_agg,pod_edge),
channel_names,
edge_to_agg_channel_capacity)
# add pods (agg + edge) layer to fat-tree
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])
# link aggregate switches in pods to core switches, add link attributes
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network,
(core,pod_agg),
channel_names,
agg_to_core_channel_capacity)
# link edge switches in pods to racks, add link attributes
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network,
(pod_edge,rack),
channel_names,
rack_to_edge_channel_capacity)
# link servers to racks, add link attributes
racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network,
(rack, server),
channel_names,
server_to_rack_channel_capacity)
racks_dict[rack].append(server)
# calc total network capacity
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2
# init global network attrs
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=node_labels,
topology_type='fat_tree',
racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity,
endpoint_label = 'server',
topology_type='unknown',
node_labels=['server'],
racks_dict=None):
'''Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
'''
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
# ensure racks dict is str so json serialisable
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
# switch racks_dict keys and values to make hashing easier
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
'''Generates channel names for channels on each link in network.'''
channels = [channel+1 for channel in range(num_channels)]
channel_names = ['channel_' + str(channel) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network,
edge,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
}
else:
attrs = {edge:
{'channels': {channel: channel_capacity for channel in channel_names},
'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network,
edges,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
for edge in edges}
else:
attrs = {edge:
{'channels':
{channel: channel_capacity for channel in channel_names},
'max_channel_capacity':
channel_capacity
} for edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
'''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
# not this node type
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
'''Gets networkx positions of nodes in fat tree network for plotting.'''
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {} # dict for heigh separation between fat tree layers
widths = {} # dict for width separation between nodes within layers
h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1/(len(node_type_dict[node_type])+1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)
idx += 1
return pos
def init_network_node_positions(net):
'''Initialises network node positions for plotting.'''
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network,
draw_node_labels=True,
ep_label='server',
network_node_size=2000,
font_size=30,
linewidths=1,
fig_scale=2,
path_to_save=None,
show_fig=False):
'''Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
'''
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])
# add nodes and edges
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
# network nodes
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network,
pos,
nodelist=network_nodes_dict[node_type],
node_size=network_node_size,
node_color=next(node_colours),
linewidths=linewidths,
label=node_type)
if draw_node_labels:
# nodes
nx.draw_networkx_labels(network,
pos,
font_size=font_size,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0)
# fibre links
fibre_links = list(network.edges)
nx.draw_networkx_edges(network,
pos,
edgelist=fibre_links,
edge_color='k',
width=3,
label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
if __name__ == '__main__':
#network = gen_simple_network()
#network = gen_nsfnet_network()
network = gen_fat_tree(k=3)
plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)
|
normal
|
{
"blob_id": "4cf2829282cb0a1673e741f78f17ce27a2817ff2",
"index": 651,
"step-1": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\n<mask token>\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\n<mask token>\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\n<mask token>\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n \"\"\"Generates channel names for channels on each link in network.\"\"\"\n channels = [(channel + 1) for channel in range(num_channels)]\n channel_names = [('channel_' + str(channel)) for channel in channels]\n return channel_names\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom trafpy.generator.src import tools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n \"\"\"Generates channel names for channels on each link in network.\"\"\"\n channels = [(channel + 1) for channel in range(num_channels)]\n channel_names = [('channel_' + str(channel)) for channel in channels]\n return channel_names\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\nif __name__ == '__main__':\n network = gen_fat_tree(k=3)\n plot_network(network, 'figures/graph/', name='network_graph.png',\n with_labels=True)\n",
"step-5": "'''Module for generating and plotting networks.'''\n\nfrom trafpy.generator.src import tools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\n\n\ndef gen_arbitrary_network(num_eps,\n ep_label=None, \n ep_capacity=12500, \n num_channels=1, \n racks_dict=None,\n topology_type=None):\n '''Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n '''\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n \n if ep_label is None:\n # must be str or not json serialisable\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [ep_label+'_'+str(i) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n # ep_label is None\n eps.append(node)\n network.graph['endpoints'] = eps\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2\n\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)\n\n init_global_network_attrs(network,\n max_nw_capacity,\n num_channels,\n ep_link_capacity=ep_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label],\n racks_dict=racks_dict,\n topology_type=topology_type)\n \n return network\n\n\n\ndef gen_nsfnet_network(ep_label='server', \n rack_label='rack',\n N=0, \n num_channels=2, \n server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10,\n show_fig=False):\n '''Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n '''\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n\n node_pair_list = [[0,1],\n [0,3],\n [0,2],\n [1,2],\n [1,7],\n [3,8],\n [3,4],\n [3,6],\n [4,5],\n [4,5],\n [5,2],\n [5,13],\n [5,12],\n [6,7],\n [7,10],\n [8,11],\n [8,9],\n [9,10],\n [9,12],\n [10,11],\n [10,13],\n [11,12]]\n\n if N == 0:\n # above nodes are all end points\n label = ep_label\n else:\n # above nodes are ToR switch nodes\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n\n # add 14 nodes\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n\n if N == 0:\n # assume all nodes are servers\n racks_dict = None\n else:\n # each of 14 nodes in NSFNET is a ToR switch\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label+'_'+str(i))\n network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))\n i += 1\n \n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)\n\n # set gloabl network attrs\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2\n\n init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet',\n racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n \n return network\n\ndef gen_simple_network(ep_label='server', \n num_channels=2, \n server_to_rack_channel_capacity=500,\n show_fig=False):\n '''Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n '''\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0,1),\n (0,2),\n (1,2),\n (2,4),\n (4,3),\n (3,1)],weight=1)\n servers = [ep_label+'_'+str(i) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5),servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)\n\n # set gloabl network attrs\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2\n\n init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label],\n topology_type='5_node_simple_network')\n\n if show_fig:\n plot_network(network, show_fig=True)\n\n \n return network\n\ndef get_endpoints(network, ep_label):\n '''Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n '''\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n\n return eps\n\ndef gen_fat_tree(k=4,\n L=2,\n n=4,\n ep_label='server',\n rack_label='rack',\n edge_label='edge',\n aggregate_label='agg',\n core_label='core',\n num_channels = 2,\n server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000,\n edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000,\n rack_to_core_channel_capacity=2000,\n show_fig=False):\n '''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n '''\n if L != 2 and L != 4:\n raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))\n if k % 2 != 0:\n raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))\n\n channel_names = gen_channel_names(num_channels)\n\n # initialise network nodes\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]\n\n #num_cores = int((k/2)**(L-1))\n #num_cores = int((k/2)**2)\n num_cores = int((k/2)**(L/2))\n num_aggs = int((k**2)/2)\n num_edges = int((k**2)/2)\n num_pods = int(2*(k/2)**(L-2))\n num_racks = int(2*(k/2)**(L-1))\n num_servers = int(num_racks * n)\n\n cores = [core_label+'_'+str(i) for i in range(num_cores)]\n aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]\n edges = [edge_label+'_'+str(i) for i in range(num_edges)]\n racks = [rack_label+'_'+str(i) for i in range(num_racks)]\n servers = [ep_label+'_'+str(i) for i in range(num_servers)]\n\n # create core and rack layer networks\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n\n # combine cores and racks into single network\n fat_tree_network = nx.compose(core_layer, rack_layer)\n \n if L == 2:\n # 2 layers: Core, ToR\n # link racks to cores, add link attributes\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n # have k/2 up-ports on each switch\n for up_port in range(int(k/2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network,\n (rack, core),\n channel_names,\n rack_to_core_channel_capacity)\n else:\n # 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.\n # group edges and aggregates into pods\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + (k/2))\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n\n # create dict of pod networks\n pod_labels = ['pod_'+str(i) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = ('pod_'+str(pod_iter),)\n pod_edges = pods[pod_iter][0]\n\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n\n # connect edge and aggregate switches within pod, add link attributes\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], \n (pod_agg,pod_edge), \n channel_names, \n edge_to_agg_channel_capacity)\n\n # add pods (agg + edge) layer to fat-tree\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])\n\n # link aggregate switches in pods to core switches, add link attributes\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network,\n (core,pod_agg),\n channel_names,\n agg_to_core_channel_capacity)\n\n # link edge switches in pods to racks, add link attributes\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network,\n (pod_edge,rack),\n channel_names,\n rack_to_edge_channel_capacity)\n\n # link servers to racks, add link attributes\n racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network,\n (rack, server),\n channel_names,\n server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n\n # calc total network capacity\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2\n\n\n # init global network attrs\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=node_labels,\n topology_type='fat_tree',\n racks_dict=racks_dict)\n\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n\n return fat_tree_network\n\n \n\n\ndef init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity,\n endpoint_label = 'server',\n topology_type='unknown', \n node_labels=['server'],\n racks_dict=None):\n '''Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n '''\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n\n # ensure racks dict is str so json serialisable\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n\n if racks_dict is not None:\n # switch racks_dict keys and values to make hashing easier\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n '''Generates channel names for channels on each link in network.'''\n channels = [channel+1 for channel in range(num_channels)]\n channel_names = ['channel_' + str(channel) for channel in channels]\n \n return channel_names\n\ndef add_edge_capacity_attrs(network, \n edge, \n channel_names, \n channel_capacity, \n bidirectional_links=True):\n '''Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n '''\n if bidirectional_links:\n attrs = {edge:\n {'{}_to_{}_port'.format(edge[0], edge[1]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n },\n '{}_to_{}_port'.format(edge[1], edge[0]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n }\n }\n }\n \n else:\n attrs = {edge:\n {'channels': {channel: channel_capacity for channel in channel_names},\n 'max_channel_capacity': channel_capacity}}\n \n nx.set_edge_attributes(network, attrs)\n\n\n\n\ndef add_edges_capacity_attrs(network, \n edges,\n channel_names,\n channel_capacity,\n bidirectional_links=True):\n '''Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n '''\n if bidirectional_links:\n attrs = {edge:\n {'{}_to_{}_port'.format(edge[0], edge[1]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n },\n '{}_to_{}_port'.format(edge[1], edge[0]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n }\n }\n for edge in edges}\n else:\n attrs = {edge: \n {'channels': \n {channel: channel_capacity for channel in channel_names},\n 'max_channel_capacity': \n channel_capacity\n } for edge in edges}\n\n nx.set_edge_attributes(network, attrs)\n \n\ndef get_node_type_dict(network, node_types=[]):\n '''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n # not this node type\n pass\n \n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n '''Gets networkx positions of nodes in fat tree network for plotting.'''\n pos = {}\n\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n \n heights = {} # dict for heigh separation between fat tree layers\n widths = {} # dict for width separation between nodes within layers\n h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights\n for node_type in node_types: \n heights[node_type] = next(h)\n widths[node_type] = 1/(len(node_type_dict[node_type])+1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)\n idx += 1\n\n return pos\n \n\ndef init_network_node_positions(net):\n '''Initialises network node positions for plotting.'''\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n \n return pos\n\n\ndef plot_network(network,\n draw_node_labels=True,\n ep_label='server',\n network_node_size=2000,\n font_size=30,\n linewidths=1,\n fig_scale=2,\n path_to_save=None, \n show_fig=False):\n '''Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n '''\n \n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n\n fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])\n\n # add nodes and edges\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n \n # network nodes\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, \n pos, \n nodelist=network_nodes_dict[node_type],\n node_size=network_node_size, \n node_color=next(node_colours), \n linewidths=linewidths, \n label=node_type)\n if draw_node_labels:\n # nodes\n nx.draw_networkx_labels(network, \n pos, \n font_size=font_size, \n font_color='k', \n font_family='sans-serif', \n font_weight='normal', \n alpha=1.0)\n \n # fibre links\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, \n pos,\n edgelist=fibre_links,\n edge_color='k',\n width=3,\n label='Fibre link')\n\n\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n\n if show_fig:\n plt.show()\n\n return fig\n\n\nif __name__ == '__main__':\n #network = gen_simple_network()\n #network = gen_nsfnet_network()\n network = gen_fat_tree(k=3)\n \n plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n",
"step-ids": [
11,
12,
13,
15,
16
]
}
|
[
11,
12,
13,
15,
16
] |
import os
CSRF_ENABLED = True
basedir = os.path.abspath(os.path.dirname(__file__))
# Heroku vs. Local Configs
if os.environ.get('HEROKU') is None:
# Database path
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
# CSRF Key
SECRET_KEY = os.urandom(24)
# Pocket API
CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'
# News API Credentials
TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'
NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'
else:
# Database path
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# CSRF Key
SECRET_KEY = os.environ['CSRF_SECRET_KEY']
# Pocket API
CONSUMER_KEY = os.environ['POCKET_KEY']
# News API Credentials
TROVE_KEY = os.environ['TROVE_KEY']
NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']
# Path where we store the migration data files
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
|
normal
|
{
"blob_id": "0656aba517023c003e837d5ad04daeb364f7fda8",
"index": 4688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('HEROKU') is None:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n SECRET_KEY = os.urandom(24)\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n CONSUMER_KEY = os.environ['POCKET_KEY']\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\n<mask token>\n",
"step-3": "<mask token>\nCSRF_ENABLED = True\nbasedir = os.path.abspath(os.path.dirname(__file__))\nif os.environ.get('HEROKU') is None:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n SECRET_KEY = os.urandom(24)\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n CONSUMER_KEY = os.environ['POCKET_KEY']\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n",
"step-4": "import os\nCSRF_ENABLED = True\nbasedir = os.path.abspath(os.path.dirname(__file__))\nif os.environ.get('HEROKU') is None:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n SECRET_KEY = os.urandom(24)\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n CONSUMER_KEY = os.environ['POCKET_KEY']\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n",
"step-5": "import os\n\nCSRF_ENABLED = True\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Heroku vs. Local Configs\nif os.environ.get('HEROKU') is None:\n # Database path\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n # CSRF Key\n SECRET_KEY = os.urandom(24)\n # Pocket API\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n # News API Credentials\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n # Database path\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n # CSRF Key\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n # Pocket API\n CONSUMER_KEY = os.environ['POCKET_KEY']\n # News API Credentials\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\n\n# Path where we store the migration data files\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.views import generic
from .models import Project
class IndexView(generic.ListView):
template_name = "projects/index.html"
context_object_name = "projectz"
def get_queryset(self):
"""Return all projects."""
return Project.objects.all()
class DetailView(generic.DetailView):
model = Project
template_name = "projects/detail.html"
|
normal
|
{
"blob_id": "23d15c719cd26ea67a032a91a3e73f0d8d3bcfd1",
"index": 6662,
"step-1": "<mask token>\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-2": "<mask token>\n\n\nclass IndexView(generic.ListView):\n <mask token>\n <mask token>\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-3": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'projects/index.html'\n context_object_name = 'projectz'\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-4": "from django.views import generic\nfrom .models import Project\n\n\nclass IndexView(generic.ListView):\n template_name = 'projects/index.html'\n context_object_name = 'projectz'\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-5": "from django.views import generic\n\nfrom .models import Project\n\n\nclass IndexView(generic.ListView):\n template_name = \"projects/index.html\"\n context_object_name = \"projectz\"\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = \"projects/detail.html\"",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from abc import ABC, abstractmethod
class Shape(ABC): # Shape is a child class of ABC
@abstractmethod
def area(self):
pass
@abstractmethod
def perimeter(self):
pass
class Square(Shape):
def __init__(self, length):
self.length = length
square = Square(4)
# this will code will not compile since abstarct methods have not been
# defined in the child class, Square
|
normal
|
{
"blob_id": "520b9246c3c617b18ca57f31ff51051cc3ff51ca",
"index": 5517,
"step-1": "<mask token>\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n <mask token>\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\nsquare = Square(4)\n",
"step-4": "from abc import ABC, abstractmethod\n\n\nclass Shape(ABC):\n\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n\n def __init__(self, length):\n self.length = length\n\n\nsquare = Square(4)\n",
"step-5": "from abc import ABC, abstractmethod\n\n\nclass Shape(ABC): # Shape is a child class of ABC\n @abstractmethod\n def area(self):\n pass\n\n @abstractmethod\n def perimeter(self):\n pass\n\n\nclass Square(Shape):\n def __init__(self, length):\n self.length = length\n\n\nsquare = Square(4)\n# this will code will not compile since abstarct methods have not been\n# defined in the child class, Square\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#encoding:utf-8
from flask import Flask
import config
from flask_rabbitmq import Queue, RabbitMQ
app = Flask(__name__)
app.config.from_object(config)
queue = Queue()
mq = RabbitMQ(app, queue)
from app import demo
|
normal
|
{
"blob_id": "ccf9c389a65d1420e87deec2100e37bccdcb5539",
"index": 6323,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_object(config)\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object(config)\nqueue = Queue()\nmq = RabbitMQ(app, queue)\n<mask token>\n",
"step-4": "from flask import Flask\nimport config\nfrom flask_rabbitmq import Queue, RabbitMQ\napp = Flask(__name__)\napp.config.from_object(config)\nqueue = Queue()\nmq = RabbitMQ(app, queue)\nfrom app import demo\n",
"step-5": "#encoding:utf-8\nfrom flask import Flask\nimport config\nfrom flask_rabbitmq import Queue, RabbitMQ\n\napp = Flask(__name__)\napp.config.from_object(config)\n\nqueue = Queue()\nmq = RabbitMQ(app, queue)\n\nfrom app import demo\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-07 12:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('warhawks', '0012_auto_20180607_1815'),
('notification', '0002_auto_20180607_1759'),
]
operations = [
migrations.CreateModel(
name='N_lostandfound',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('message', models.CharField(max_length=100)),
('read', models.BooleanField(default=False)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_user_lost', to=settings.AUTH_USER_MODEL)),
('lf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LostAndFound')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LFComment')),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "c6c13ab24e4907eecf1db4fded28d4fc8126c834",
"index": 1170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('warhawks', '0012_auto_20180607_1815'), (\n 'notification', '0002_auto_20180607_1759')]\n operations = [migrations.CreateModel(name='N_lostandfound', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('date', models.DateTimeField\n (auto_now_add=True)), ('message', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)), ('from_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='from_user_lost', to=settings.AUTH_USER_MODEL)), ('lf',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'warhawks.LostAndFound')), ('post', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='warhawks.LFComment')), (\n 'to_user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('warhawks', '0012_auto_20180607_1815'), (\n 'notification', '0002_auto_20180607_1759')]\n operations = [migrations.CreateModel(name='N_lostandfound', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('date', models.DateTimeField\n (auto_now_add=True)), ('message', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)), ('from_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='from_user_lost', to=settings.AUTH_USER_MODEL)), ('lf',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'warhawks.LostAndFound')), ('post', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='warhawks.LFComment')), (\n 'to_user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-07 12:30\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('warhawks', '0012_auto_20180607_1815'),\n ('notification', '0002_auto_20180607_1759'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='N_lostandfound',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('message', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)),\n ('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_user_lost', to=settings.AUTH_USER_MODEL)),\n ('lf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LostAndFound')),\n ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LFComment')),\n ('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
left_motor = 1563872856371375
right_motor = 7567382956378165
servo = 9275392915737265
def autonomous_setup():
print("Autonomous mode has started!")
Robot.run(autonomous_actions)
def autonomous_main():
pass
async def autonomous_actions():
print("Autonomous action sequence started")
await Actions.sleep(1.0)
print("1 second has passed in autonomous mode")
def teleop_setup():
print("Tele-operated mode has started!")
def move_arm();
Robot.get_value(left_motor, serv0, 1)
time.sleep(2)
Robot.get_value(left_motor, serv0, 0)
def teleop_main():
if gamepad.get_value("r_trigger") > 0.5:
while True:
# move forward
Robot.get_value(left_motor, "duty_cycle", -1.0)
Robot.get_value(right_motor, "duty_cycle", -1.0)
else if gamepad.get_value("l_trigger") > 0.5:
while True:
# move backward
Robot.get_value(left_motor, "duty_cycle", 1.0)
Robot.get_value(right_motor, "duty_cycle", 1.0)
else if 1.0 > gamepad.get_value("joystick_left_y") > 0.75
while True:
# turn right
Robot.get_value(left_motor, "duty_cycle", 1.0)
Robot.get_value(right_motor, "duty_cycle", -1.0)
time.sleep(1)
else if 1.0 > gamepad.get_value("joystick_right_y") > 0.75)
while True:
# turn left
Robot.get_value(left_motor, "duty_cycle", -1.0)
Robot.get_value(right_motor, "duty_cycle", 1.0)
time.sleep(1)
if gamepad.get_vlue("button_a") == 1:
move_arm()
|
normal
|
{
"blob_id": "a2d23c05e1ca04d25f5f5012881c4000e6316cb9",
"index": 2504,
"step-1": "left_motor = 1563872856371375\nright_motor = 7567382956378165\nservo = 9275392915737265\n\ndef autonomous_setup():\n print(\"Autonomous mode has started!\")\n Robot.run(autonomous_actions)\n\ndef autonomous_main():\n pass\n\nasync def autonomous_actions():\n print(\"Autonomous action sequence started\")\n await Actions.sleep(1.0)\n print(\"1 second has passed in autonomous mode\")\n\ndef teleop_setup():\n print(\"Tele-operated mode has started!\")\n\ndef move_arm();\n Robot.get_value(left_motor, serv0, 1)\n time.sleep(2)\n Robot.get_value(left_motor, serv0, 0)\n\ndef teleop_main():\n if gamepad.get_value(\"r_trigger\") > 0.5:\n while True:\n # move forward\n Robot.get_value(left_motor, \"duty_cycle\", -1.0)\n Robot.get_value(right_motor, \"duty_cycle\", -1.0)\n else if gamepad.get_value(\"l_trigger\") > 0.5:\n while True:\n # move backward\n Robot.get_value(left_motor, \"duty_cycle\", 1.0)\n Robot.get_value(right_motor, \"duty_cycle\", 1.0)\n else if 1.0 > gamepad.get_value(\"joystick_left_y\") > 0.75\n while True:\n # turn right\n Robot.get_value(left_motor, \"duty_cycle\", 1.0)\n Robot.get_value(right_motor, \"duty_cycle\", -1.0)\n time.sleep(1)\n else if 1.0 > gamepad.get_value(\"joystick_right_y\") > 0.75)\n while True:\n # turn left\n Robot.get_value(left_motor, \"duty_cycle\", -1.0)\n Robot.get_value(right_motor, \"duty_cycle\", 1.0)\n time.sleep(1)\n if gamepad.get_vlue(\"button_a\") == 1:\n move_arm()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import socket
import struct
def parsing_ethernet_header(data):
ethernet_header=struct.unpack("!6c6c2s",data)
ether_dest = convert_ethernet_address(ethernet_header[0:6])
ether_src = convert_ethernet_address(ethernet_header[6:12])
ip_header="0x"+ethernet_header[12].hex()
print("=========ethernet header==========")
print("src_mac_address:", ether_src)
print("dest_mac_address:",ether_dest)
print("ip_version",ip_header)
def convert_ethernet_address(data):
ethernet_addr =list()
for i in data:
ethernet_addr.append(i.hex())
ethernet_addr=":".join(ethernet_addr)
return ethernet_addr
def parsing_ip_header(data):
ip_header=struct.unpack("!1c1c2s2s2s1c1c2s4c4c",data)
print("============ip header=============")
ip_ver_len= int(ip_header[0].hex(), 16)
print("ip_version:",ip_ver_len // 16)
print("ip_length:", ip_ver_len % 16)
differ_expli=int(ip_header[1].hex(),16)
print("differentiated_service_codepoint:",differ_expli//16)
print("explicit_congestion_notification:",differ_expli%16)
total_length=int(ip_header[2].hex(),16)
print("total_length:",total_length)
identification=ip_header[3].hex()
print("identification:0x",identification)
flags=ip_header[4].hex()
print("flags:0x",flags)
flags_int=int(ip_header[4].hex(),16)
print(">>>reserved_bit:",flags_int>>15)
print(">>>fragments:",(flags_int>>13)& 0x0001)
print(">>>fragments_offset:",flags_int & 0x1fff)
time_to_live=int(ip_header[5].hex(),16)
print("Time to live:",time_to_live)
protocol=ip_header[6].hex()
print("protocol:0x",protocol)
header_check=ip_header[7].hex()
print("header checksum:0x",header_check)
source_addr=convert_ip_address(ip_header[8:12])
print("source_ip_address:",source_addr)
dest_addr=convert_ip_address(ip_header[12:16])
print("dest_ip_address:",dest_addr)
def ch_UDP_TCP(data):
temp=struct.unpack("1c",data)
result=int(temp[0].hex(),16)
return result
def convert_ip_address(data):
ip_addr=list()
for i in data:
ip_addr.append(str(int(i.hex(),16)) )
ip_addr=".".join(ip_addr)
return ip_addr
def parsing_TCP_header(data):
print("=============tcp header==============")
TCP_header=struct.unpack("!2s2s1I1I2s2s2s2s",data)
src_port=int(TCP_header[0].hex(),16)
print("src_port:",src_port)
dec_port=int(TCP_header[1].hex(),16)
print("dec_port:",dec_port)
seq_num=TCP_header[2]
print("seq_num:",seq_num)
ack_num=TCP_header[3]
print("ack_num:",ack_num)
header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f
print("header_len:",header_len)
flags=int(TCP_header[4].hex(),16)&0x0fff
print("flags:",flags)
reserved=flags>>9
print(">>>reserved",reserved)
nonce=(flags>>8)&0x001
print(">>>nonce:",nonce)
cwr=(flags>>7)&0x001
print(">>>cwr:",cwr)
urgent=(flags>>5)&0x001
print(">>>urgent:",urgent)
ack=(flags>>4)&0x001
print(">>>ack:",ack)
push=(flags>>3)&0x001
print(">>>push:",push)
reset=(flags>>2)&0x001
print(">>>reset:",reset)
syn=(flags>>1)&0x001
print(">>>syn:",syn)
fin=flags&0x001
print(">>>fin:",fin)
window_size=int(TCP_header[5].hex(),16)
print("Window_size_value:",window_size)
checksum=int(TCP_header[6].hex(),16)
print("checksum:",checksum)
urgent_pointer=int(TCP_header[7].hex(),16)
print("urgent_pointer:",urgent_pointer)
def parsing_UDP_header(data):
UDP_header=struct.unpack("2s2s2s2s",data)
print("=============udp_header=============")
src_port=int(UDP_header[0].hex(),16)
print("src_port:",src_port)
dst_port=int(UDP_header[1].hex(),16)
print("dst_port:",dst_port)
leng=int(UDP_header[2].hex(),16)
print("leng:",leng)
header_checksum=UDP_header[3].hex()
print("header_checksum:0x",header_checksum)
recv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))
print("<<<<<<Packet Capture Start>>>>>>>")
while True:
data = recv_socket.recvfrom(20000)
parsing_ethernet_header(data[0][0:14])
parsing_ip_header(data[0][14:34])
flag =ch_UDP_TCP(data[0][23:24])
if flag==6:
parsing_TCP_header(data[0][34:54])
elif flag==17:
parsing_UDP_header(data[0][34:42])
|
normal
|
{
"blob_id": "9b715fb95e89804a57ea77a98face673b57220c6",
"index": 4494,
"step-1": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\n<mask token>\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-3": "<mask token>\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs\n (2048))\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-4": "import socket\nimport struct\n\n\ndef parsing_ethernet_header(data):\n ethernet_header = struct.unpack('!6c6c2s', data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header = '0x' + ethernet_header[12].hex()\n print('=========ethernet header==========')\n print('src_mac_address:', ether_src)\n print('dest_mac_address:', ether_dest)\n print('ip_version', ip_header)\n\n\ndef convert_ethernet_address(data):\n ethernet_addr = list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr = ':'.join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack('!1c1c2s2s2s1c1c2s4c4c', data)\n print('============ip header=============')\n ip_ver_len = int(ip_header[0].hex(), 16)\n print('ip_version:', ip_ver_len // 16)\n print('ip_length:', ip_ver_len % 16)\n differ_expli = int(ip_header[1].hex(), 16)\n print('differentiated_service_codepoint:', differ_expli // 16)\n print('explicit_congestion_notification:', differ_expli % 16)\n total_length = int(ip_header[2].hex(), 16)\n print('total_length:', total_length)\n identification = ip_header[3].hex()\n print('identification:0x', identification)\n flags = ip_header[4].hex()\n print('flags:0x', flags)\n flags_int = int(ip_header[4].hex(), 16)\n print('>>>reserved_bit:', flags_int >> 15)\n print('>>>fragments:', flags_int >> 13 & 1)\n print('>>>fragments_offset:', flags_int & 8191)\n time_to_live = int(ip_header[5].hex(), 16)\n print('Time to live:', time_to_live)\n protocol = ip_header[6].hex()\n print('protocol:0x', protocol)\n header_check = ip_header[7].hex()\n print('header checksum:0x', header_check)\n source_addr = convert_ip_address(ip_header[8:12])\n print('source_ip_address:', source_addr)\n dest_addr = convert_ip_address(ip_header[12:16])\n print('dest_ip_address:', dest_addr)\n\n\ndef ch_UDP_TCP(data):\n temp = struct.unpack('1c', data)\n result = int(temp[0].hex(), 16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(str(int(i.hex(), 16)))\n ip_addr = '.'.join(ip_addr)\n return ip_addr\n\n\ndef parsing_TCP_header(data):\n print('=============tcp header==============')\n TCP_header = struct.unpack('!2s2s1I1I2s2s2s2s', data)\n src_port = int(TCP_header[0].hex(), 16)\n print('src_port:', src_port)\n dec_port = int(TCP_header[1].hex(), 16)\n print('dec_port:', dec_port)\n seq_num = TCP_header[2]\n print('seq_num:', seq_num)\n ack_num = TCP_header[3]\n print('ack_num:', ack_num)\n header_len = int(TCP_header[4].hex(), 16) >> 12 & 15\n print('header_len:', header_len)\n flags = int(TCP_header[4].hex(), 16) & 4095\n print('flags:', flags)\n reserved = flags >> 9\n print('>>>reserved', reserved)\n nonce = flags >> 8 & 1\n print('>>>nonce:', nonce)\n cwr = flags >> 7 & 1\n print('>>>cwr:', cwr)\n urgent = flags >> 5 & 1\n print('>>>urgent:', urgent)\n ack = flags >> 4 & 1\n print('>>>ack:', ack)\n push = flags >> 3 & 1\n print('>>>push:', push)\n reset = flags >> 2 & 1\n print('>>>reset:', reset)\n syn = flags >> 1 & 1\n print('>>>syn:', syn)\n fin = flags & 1\n print('>>>fin:', fin)\n window_size = int(TCP_header[5].hex(), 16)\n print('Window_size_value:', window_size)\n checksum = int(TCP_header[6].hex(), 16)\n print('checksum:', checksum)\n urgent_pointer = int(TCP_header[7].hex(), 16)\n print('urgent_pointer:', urgent_pointer)\n\n\ndef parsing_UDP_header(data):\n UDP_header = struct.unpack('2s2s2s2s', data)\n print('=============udp_header=============')\n src_port = int(UDP_header[0].hex(), 16)\n print('src_port:', src_port)\n dst_port = int(UDP_header[1].hex(), 16)\n print('dst_port:', dst_port)\n leng = int(UDP_header[2].hex(), 16)\n print('leng:', leng)\n header_checksum = UDP_header[3].hex()\n print('header_checksum:0x', header_checksum)\n\n\nrecv_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs\n (2048))\nprint('<<<<<<Packet Capture Start>>>>>>>')\nwhile True:\n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n flag = ch_UDP_TCP(data[0][23:24])\n if flag == 6:\n parsing_TCP_header(data[0][34:54])\n elif flag == 17:\n parsing_UDP_header(data[0][34:42])\n",
"step-5": "import socket\nimport struct\n\ndef parsing_ethernet_header(data):\n ethernet_header=struct.unpack(\"!6c6c2s\",data)\n ether_dest = convert_ethernet_address(ethernet_header[0:6])\n ether_src = convert_ethernet_address(ethernet_header[6:12])\n ip_header=\"0x\"+ethernet_header[12].hex()\n\n print(\"=========ethernet header==========\")\n print(\"src_mac_address:\", ether_src)\n print(\"dest_mac_address:\",ether_dest)\n print(\"ip_version\",ip_header)\n\ndef convert_ethernet_address(data):\n ethernet_addr =list()\n for i in data:\n ethernet_addr.append(i.hex())\n ethernet_addr=\":\".join(ethernet_addr)\n return ethernet_addr\n\n\ndef parsing_ip_header(data):\n ip_header=struct.unpack(\"!1c1c2s2s2s1c1c2s4c4c\",data)\n \n print(\"============ip header=============\")\n \n ip_ver_len= int(ip_header[0].hex(), 16)\n print(\"ip_version:\",ip_ver_len // 16)\n print(\"ip_length:\", ip_ver_len % 16)\n\n differ_expli=int(ip_header[1].hex(),16)\n print(\"differentiated_service_codepoint:\",differ_expli//16)\n print(\"explicit_congestion_notification:\",differ_expli%16)\n\n total_length=int(ip_header[2].hex(),16)\n print(\"total_length:\",total_length)\n \n identification=ip_header[3].hex()\n print(\"identification:0x\",identification)\n\n flags=ip_header[4].hex()\n print(\"flags:0x\",flags)\n flags_int=int(ip_header[4].hex(),16)\n print(\">>>reserved_bit:\",flags_int>>15)\n print(\">>>fragments:\",(flags_int>>13)& 0x0001)\n print(\">>>fragments_offset:\",flags_int & 0x1fff)\n\n\n time_to_live=int(ip_header[5].hex(),16)\n print(\"Time to live:\",time_to_live)\n\n protocol=ip_header[6].hex()\n print(\"protocol:0x\",protocol)\n\n header_check=ip_header[7].hex()\n print(\"header checksum:0x\",header_check)\n\n source_addr=convert_ip_address(ip_header[8:12])\n print(\"source_ip_address:\",source_addr)\n\n dest_addr=convert_ip_address(ip_header[12:16])\n print(\"dest_ip_address:\",dest_addr)\n\ndef ch_UDP_TCP(data):\n temp=struct.unpack(\"1c\",data)\n result=int(temp[0].hex(),16)\n return result\n\n\ndef convert_ip_address(data):\n ip_addr=list()\n for i in data:\n ip_addr.append(str(int(i.hex(),16)) ) \n ip_addr=\".\".join(ip_addr)\n return ip_addr\n\ndef parsing_TCP_header(data):\n print(\"=============tcp header==============\")\n TCP_header=struct.unpack(\"!2s2s1I1I2s2s2s2s\",data)\n\n src_port=int(TCP_header[0].hex(),16)\n print(\"src_port:\",src_port)\n\n dec_port=int(TCP_header[1].hex(),16)\n print(\"dec_port:\",dec_port)\n\n seq_num=TCP_header[2]\n print(\"seq_num:\",seq_num)\n\n ack_num=TCP_header[3]\n print(\"ack_num:\",ack_num)\n\n header_len=(int(TCP_header[4].hex(),16)>>12)&0x000f\n print(\"header_len:\",header_len)\n\n flags=int(TCP_header[4].hex(),16)&0x0fff\n print(\"flags:\",flags)\n\n reserved=flags>>9\n print(\">>>reserved\",reserved)\n\n nonce=(flags>>8)&0x001\n print(\">>>nonce:\",nonce)\n\n cwr=(flags>>7)&0x001\n print(\">>>cwr:\",cwr)\n\n urgent=(flags>>5)&0x001\n print(\">>>urgent:\",urgent)\n\n ack=(flags>>4)&0x001\n print(\">>>ack:\",ack)\n\n push=(flags>>3)&0x001\n print(\">>>push:\",push)\n\n reset=(flags>>2)&0x001\n print(\">>>reset:\",reset)\n\n syn=(flags>>1)&0x001\n print(\">>>syn:\",syn)\n\n fin=flags&0x001\n print(\">>>fin:\",fin)\n\n window_size=int(TCP_header[5].hex(),16)\n print(\"Window_size_value:\",window_size)\n\n checksum=int(TCP_header[6].hex(),16)\n print(\"checksum:\",checksum)\n\n urgent_pointer=int(TCP_header[7].hex(),16)\n print(\"urgent_pointer:\",urgent_pointer)\n\ndef parsing_UDP_header(data):\n UDP_header=struct.unpack(\"2s2s2s2s\",data)\n print(\"=============udp_header=============\")\n\n src_port=int(UDP_header[0].hex(),16)\n print(\"src_port:\",src_port)\n\n dst_port=int(UDP_header[1].hex(),16)\n print(\"dst_port:\",dst_port)\n\n leng=int(UDP_header[2].hex(),16)\n print(\"leng:\",leng)\n\n header_checksum=UDP_header[3].hex()\n print(\"header_checksum:0x\",header_checksum)\n\n\n\nrecv_socket = socket.socket(socket.AF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))\n\nprint(\"<<<<<<Packet Capture Start>>>>>>>\")\n\nwhile True:\n \n data = recv_socket.recvfrom(20000)\n parsing_ethernet_header(data[0][0:14])\n parsing_ip_header(data[0][14:34])\n\n flag =ch_UDP_TCP(data[0][23:24])\n \n if flag==6:\n parsing_TCP_header(data[0][34:54])\n\n elif flag==17:\n parsing_UDP_header(data[0][34:42])\n \n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
"""social_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth.views import (password_reset, password_reset_done, password_reset_complete,
password_reset_confirm, password_change, password_change_done)
from django.conf import settings
from django.conf.urls.static import static
from account.views import dashboard
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('account.urls'), name='account'),
path('images/', include('images.urls', namespace='images')),
path('password_reset/', password_reset, {'template_name': 'registration/password_reset.html'}, name='password_reset'),
path('password_reset/done/', password_reset_done, name='password_reset_done'),
path('password_reset/confirm/<str:uidb64>/<str:token>/', password_reset_confirm, name='password_reset_confirm'),
path('password_reset/complete/', password_reset_complete, name='password_reset_complete'),
path('password_change/', password_change, name='password_change'),
path('password_change/done/', password_change_done, name='password_change_done'),
path('', dashboard, name='dashboard'),
path('social-auth/', include('social_django.urls', namespace='social')),
path('api/accounts/', include('account.api.urls', namespace='api-accounts')),
path('api/images/', include('images.api.urls', namespace='api-images')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "bf1221bc9768cff2edb67e0e5f5cea0ee2dd64e5",
"index": 7740,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-3": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('account/', include(\n 'account.urls'), name='account'), path('images/', include('images.urls',\n namespace='images')), path('password_reset/', password_reset, {\n 'template_name': 'registration/password_reset.html'}, name=\n 'password_reset'), path('password_reset/done/', password_reset_done,\n name='password_reset_done'), path(\n 'password_reset/confirm/<str:uidb64>/<str:token>/',\n password_reset_confirm, name='password_reset_confirm'), path(\n 'password_reset/complete/', password_reset_complete, name=\n 'password_reset_complete'), path('password_change/', password_change,\n name='password_change'), path('password_change/done/',\n password_change_done, name='password_change_done'), path('', dashboard,\n name='dashboard'), path('social-auth/', include('social_django.urls',\n namespace='social')), path('api/accounts/', include('account.api.urls',\n namespace='api-accounts')), path('api/images/', include(\n 'images.api.urls', namespace='api-images'))]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth.views import password_reset, password_reset_done, password_reset_complete, password_reset_confirm, password_change, password_change_done\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom account.views import dashboard\nurlpatterns = [path('admin/', admin.site.urls), path('account/', include(\n 'account.urls'), name='account'), path('images/', include('images.urls',\n namespace='images')), path('password_reset/', password_reset, {\n 'template_name': 'registration/password_reset.html'}, name=\n 'password_reset'), path('password_reset/done/', password_reset_done,\n name='password_reset_done'), path(\n 'password_reset/confirm/<str:uidb64>/<str:token>/',\n password_reset_confirm, name='password_reset_confirm'), path(\n 'password_reset/complete/', password_reset_complete, name=\n 'password_reset_complete'), path('password_change/', password_change,\n name='password_change'), path('password_change/done/',\n password_change_done, name='password_change_done'), path('', dashboard,\n name='dashboard'), path('social-auth/', include('social_django.urls',\n namespace='social')), path('api/accounts/', include('account.api.urls',\n namespace='api-accounts')), path('api/images/', include(\n 'images.api.urls', namespace='api-images'))]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-5": "\"\"\"social_website URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.contrib.auth.views import (password_reset, password_reset_done, password_reset_complete,\n password_reset_confirm, password_change, password_change_done)\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom account.views import dashboard\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('account/', include('account.urls'), name='account'),\n path('images/', include('images.urls', namespace='images')),\n path('password_reset/', password_reset, {'template_name': 'registration/password_reset.html'}, name='password_reset'),\n path('password_reset/done/', password_reset_done, name='password_reset_done'),\n path('password_reset/confirm/<str:uidb64>/<str:token>/', password_reset_confirm, name='password_reset_confirm'),\n path('password_reset/complete/', password_reset_complete, name='password_reset_complete'),\n path('password_change/', password_change, name='password_change'),\n path('password_change/done/', password_change_done, name='password_change_done'),\n path('', dashboard, name='dashboard'),\n path('social-auth/', include('social_django.urls', namespace='social')),\n path('api/accounts/', include('account.api.urls', namespace='api-accounts')),\n path('api/images/', include('images.api.urls', namespace='api-images')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from types import MappingProxyType
from typing import Any, Dict, Mapping, Type, TypeVar, Union
import yaml
from typing_extensions import Protocol
from mashumaro.serializer.base import DataClassDictMixin
DEFAULT_DICT_PARAMS = {
"use_bytes": False,
"use_enum": False,
"use_datetime": False,
}
EncodedData = Union[str, bytes]
T = TypeVar("T", bound="DataClassYAMLMixin")
class Encoder(Protocol): # pragma no cover
def __call__(self, o, **kwargs) -> EncodedData:
...
class Decoder(Protocol): # pragma no cover
def __call__(self, packed: EncodedData, **kwargs) -> Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(
self: T,
encoder: Encoder = yaml.dump, # type: ignore
dict_params: Mapping = MappingProxyType({}),
**encoder_kwargs,
) -> EncodedData:
return encoder(
self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),
**encoder_kwargs,
)
@classmethod
def from_yaml(
cls: Type[T],
data: EncodedData,
decoder: Decoder = yaml.safe_load, # type: ignore
dict_params: Mapping = MappingProxyType({}),
**decoder_kwargs,
) -> T:
return cls.from_dict(
decoder(data, **decoder_kwargs),
**dict(DEFAULT_DICT_PARAMS, **dict_params),
)
|
normal
|
{
"blob_id": "15edb1c051ccbc6f927c0a859288511f94a3d853",
"index": 986,
"step-1": "<mask token>\n\n\nclass Encoder(Protocol):\n <mask token>\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-2": "<mask token>\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-3": "<mask token>\nDEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,\n 'use_datetime': False}\nEncodedData = Union[str, bytes]\nT = TypeVar('T', bound='DataClassYAMLMixin')\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-4": "from types import MappingProxyType\nfrom typing import Any, Dict, Mapping, Type, TypeVar, Union\nimport yaml\nfrom typing_extensions import Protocol\nfrom mashumaro.serializer.base import DataClassDictMixin\nDEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,\n 'use_datetime': False}\nEncodedData = Union[str, bytes]\nT = TypeVar('T', bound='DataClassYAMLMixin')\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-5": "from types import MappingProxyType\nfrom typing import Any, Dict, Mapping, Type, TypeVar, Union\n\nimport yaml\nfrom typing_extensions import Protocol\n\nfrom mashumaro.serializer.base import DataClassDictMixin\n\nDEFAULT_DICT_PARAMS = {\n \"use_bytes\": False,\n \"use_enum\": False,\n \"use_datetime\": False,\n}\nEncodedData = Union[str, bytes]\nT = TypeVar(\"T\", bound=\"DataClassYAMLMixin\")\n\n\nclass Encoder(Protocol): # pragma no cover\n def __call__(self, o, **kwargs) -> EncodedData:\n ...\n\n\nclass Decoder(Protocol): # pragma no cover\n def __call__(self, packed: EncodedData, **kwargs) -> Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n def to_yaml(\n self: T,\n encoder: Encoder = yaml.dump, # type: ignore\n dict_params: Mapping = MappingProxyType({}),\n **encoder_kwargs,\n ) -> EncodedData:\n\n return encoder(\n self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),\n **encoder_kwargs,\n )\n\n @classmethod\n def from_yaml(\n cls: Type[T],\n data: EncodedData,\n decoder: Decoder = yaml.safe_load, # type: ignore\n dict_params: Mapping = MappingProxyType({}),\n **decoder_kwargs,\n ) -> T:\n return cls.from_dict(\n decoder(data, **decoder_kwargs),\n **dict(DEFAULT_DICT_PARAMS, **dict_params),\n )\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# pick three names
names = ["Mia", "Francis", "Eva"]
# propmpt user for his/her name
print("Please enter your name:")
user_name = input()
if user_name in names:
print("Hi there, {}!".format(user_name))
else:
print("Who goes there?")
|
normal
|
{
"blob_id": "59c33383365d10c108253f7b5a210d40718913a2",
"index": 9653,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Please enter your name:')\n<mask token>\nif user_name in names:\n print('Hi there, {}!'.format(user_name))\nelse:\n print('Who goes there?')\n",
"step-3": "names = ['Mia', 'Francis', 'Eva']\nprint('Please enter your name:')\nuser_name = input()\nif user_name in names:\n print('Hi there, {}!'.format(user_name))\nelse:\n print('Who goes there?')\n",
"step-4": "# pick three names\nnames = [\"Mia\", \"Francis\", \"Eva\"]\n\n# propmpt user for his/her name\nprint(\"Please enter your name:\")\nuser_name = input()\nif user_name in names:\n print(\"Hi there, {}!\".format(user_name))\nelse:\n print(\"Who goes there?\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy
from scipy.spatial.distance import cosine
def similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray
) ->float:
return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)
|
normal
|
{
"blob_id": "ec9f27b4313f72ae6eb7e8280d47de226aeb6bb1",
"index": 2270,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray\n ) ->float:\n return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)\n",
"step-3": "import numpy\nfrom scipy.spatial.distance import cosine\n\n\ndef similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray\n ) ->float:\n return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from .simulator import SpatialSIRSimulator as Simulator
from .util import Prior
from .util import PriorExperiment
from .util import Truth
from .util import log_likelihood
|
normal
|
{
"blob_id": "4f06eddfac38574a0ae3bdd0ea2ac81291380166",
"index": 9987,
"step-1": "<mask token>\n",
"step-2": "from .simulator import SpatialSIRSimulator as Simulator\nfrom .util import Prior\nfrom .util import PriorExperiment\nfrom .util import Truth\nfrom .util import log_likelihood\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/bin/python3
# TODO: implement the stack O(N) version
'''
Naive: O(N^3) or sum_{k=1...N}( O(N^2 (N-K)) )
for each size N
for each window of size N in the array
traverse the window to find the max
Naive with heap: O(N^2 log N)
for each size N O(N)
traverse array and accumulate window of size N O(N log N)
find max O(1)
DP:
Notice that min(W, p), the min size for window of size W and at position p, is
equal to min(min(W - 1, p), min(W - 1, p + 1)). Therefore, DP with these
tables can reduce the size of the problem to O(W^2) ~= O(N^2). Is this good
enough? No.
Domination windows:
Let us say that i dominates a contiguous range of n values if it's lower than
all n of its neighboring values. This means that i will show up as a min window
when considering window sizes of up to size n. We want to find the largest i
such that it domaintes other numbers in a window of size n. Now how to find this
efficiently? If we iterate through each i and compare it to its n neighbors,
that will also be O(N^2) time.
Start with lowest number and 1-dimensional flood fill. This will take O(N^2)
time in the worst case though.
However, you don't actually have to perform the flood fill. Instead, we can just
use the coordinates of lower numbers and perform something like binary search
to find the closest coordinates to a given coordinate in O(log N) time.
Overall this means that we iterate through each number, starting from the
lowest, and perform O(log N) time binary searches to find the boundaries over
which this element i dominates. Total time is O(N log N).
'''
import math
import os
import random
import re
import sys
from collections import defaultdict
from heapq import heappush, heappop
from bisect import insort_left
# Complete the riddle function below.
def riddle(lst):
'''
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
'''
max_by_w_size = { w: -float('inf') for w in range(1, len(lst) + 1) }
# note that bounding_indices are indexes into len(lst), not values themselves
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
# note that l_index and r_index are indices to the bounding indices
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
# (l_point + 1, r_point) defines a "dominating window" for `value`
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1 # insert on the right side of the same number, not that it should matter?
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
def riddle_dp(arr):
'''
Too slow to pass large test cases. See `riddle`.
'''
N = len(arr)
min_w = {} # dict of (win_size, win_position) to minimum
for i, el in enumerate(arr):
min_w[(1, i)] = el
for w in range(2, len(arr) + 1):
for i in range(N - w + 1):
# print('w, i', w, i)
min_w[(w, i)] = min(min_w[(w - 1, i)], min_w[(w - 1, i + 1)])
# print('min_w', min_w)
return [max(min_w[(w, i)] for i in range(N - w + 1)) for w in range(1, len(arr) + 1)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
|
normal
|
{
"blob_id": "dce7fd0c9ed8e1d433f9131a8d137c8dcca4ac56",
"index": 8307,
"step-1": "<mask token>\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\ndef riddle_dp(arr):\n \"\"\"\n Too slow to pass large test cases. See `riddle`.\n \"\"\"\n N = len(arr)\n min_w = {}\n for i, el in enumerate(arr):\n min_w[1, i] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])\n return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1, \n len(arr) + 1)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\ndef riddle_dp(arr):\n \"\"\"\n Too slow to pass large test cases. See `riddle`.\n \"\"\"\n N = len(arr)\n min_w = {}\n for i, el in enumerate(arr):\n min_w[1, i] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])\n return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1, \n len(arr) + 1)]\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input())\n arr = list(map(int, input().rstrip().split()))\n res = riddle(arr)\n fptr.write(' '.join(map(str, res)))\n fptr.write('\\n')\n fptr.close()\n",
"step-4": "<mask token>\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\nfrom bisect import insort_left\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\ndef riddle_dp(arr):\n \"\"\"\n Too slow to pass large test cases. See `riddle`.\n \"\"\"\n N = len(arr)\n min_w = {}\n for i, el in enumerate(arr):\n min_w[1, i] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])\n return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1, \n len(arr) + 1)]\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input())\n arr = list(map(int, input().rstrip().split()))\n res = riddle(arr)\n fptr.write(' '.join(map(str, res)))\n fptr.write('\\n')\n fptr.close()\n",
"step-5": "#!/bin/python3\n\n# TODO: implement the stack O(N) version\n\n'''\nNaive: O(N^3) or sum_{k=1...N}( O(N^2 (N-K)) )\n for each size N\n for each window of size N in the array\n traverse the window to find the max\n\nNaive with heap: O(N^2 log N)\n for each size N O(N)\n traverse array and accumulate window of size N O(N log N)\n find max O(1)\n\nDP:\nNotice that min(W, p), the min size for window of size W and at position p, is\nequal to min(min(W - 1, p), min(W - 1, p + 1)). Therefore, DP with these\ntables can reduce the size of the problem to O(W^2) ~= O(N^2). Is this good\nenough? No.\n\nDomination windows:\nLet us say that i dominates a contiguous range of n values if it's lower than\nall n of its neighboring values. This means that i will show up as a min window\nwhen considering window sizes of up to size n. We want to find the largest i\nsuch that it domaintes other numbers in a window of size n. Now how to find this\nefficiently? If we iterate through each i and compare it to its n neighbors,\nthat will also be O(N^2) time.\n\nStart with lowest number and 1-dimensional flood fill. This will take O(N^2)\ntime in the worst case though.\n\nHowever, you don't actually have to perform the flood fill. Instead, we can just\nuse the coordinates of lower numbers and perform something like binary search\nto find the closest coordinates to a given coordinate in O(log N) time.\n\nOverall this means that we iterate through each number, starting from the\nlowest, and perform O(log N) time binary searches to find the boundaries over\nwhich this element i dominates. Total time is O(N log N).\n'''\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\nfrom bisect import insort_left\n\n# Complete the riddle function below.\ndef riddle(lst):\n '''\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n '''\n max_by_w_size = { w: -float('inf') for w in range(1, len(lst) + 1) }\n # note that bounding_indices are indexes into len(lst), not values themselves\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n # note that l_index and r_index are indices to the bounding indices\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n # (l_point + 1, r_point) defines a \"dominating window\" for `value`\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1 # insert on the right side of the same number, not that it should matter?\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\ndef riddle_dp(arr):\n '''\n Too slow to pass large test cases. See `riddle`.\n '''\n N = len(arr)\n min_w = {} # dict of (win_size, win_position) to minimum\n for i, el in enumerate(arr):\n min_w[(1, i)] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n # print('w, i', w, i)\n min_w[(w, i)] = min(min_w[(w - 1, i)], min_w[(w - 1, i + 1)])\n # print('min_w', min_w)\n return [max(min_w[(w, i)] for i in range(N - w + 1)) for w in range(1, len(arr) + 1)]\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = riddle(arr)\n\n fptr.write(' '.join(map(str, res)))\n fptr.write('\\n')\n\n fptr.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
def get_train_batches(data_dir='/home/yunhan/batchified'):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
# todo: read in data that is preoprocessed
# Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)
n = 53
idx = np.random.permutation(n)
idx = idx + 1
for i in range(n):
X = np.load("%s/X%d.npy" % (data_dir, idx[i]))/255.
Y = np.load("%s/y%d.npy" % (data_dir, idx[i])).reshape(-1)
yield X, Y
def get_evaluate_batches(data_dir='/home/yunhan/batchified'):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
# train 3 valid 1
# Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)
n = 18
idx = np.random.permutation(n)
idx = idx + 54
for i in range(n):
X = np.load("%s/X%d.npy" % (data_dir, idx[i]))/255.
Y = np.load("%s/y%d.npy" % (data_dir, idx[i])).reshape(-1)
yield X, Y
def get_test_batches(data_dir='/home/yunhan/batchified'):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
# train 3 valid 1
# Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)
n = 18
idx = np.random.permutation(n)
idx = idx + 72
for i in range(n):
X = np.load("%s/X%d.npy" % (data_dir, idx[i]))/255.
Y = np.load("%s/y%d.npy" % (data_dir, idx[i])).reshape(-1)
yield X, Y
def get_batches_mono(data_dir):
"""
return a list or generator of (large) ndarrays,
in order to efficiently utilize GPU
"""
X = np.load('/home/yunhan/data_dir/train_x_224.npy')
# X = np.load('train_x_sample.npy')
X = X / 255
# X = np.load('/home/yunhan/data_dir/train_x_224.npy')
Y = np.load('/home/yunhan/data_dir/train_y_224.npy')
# Y = np.load('train_y_sample.npy')
return [(X, Y, 32, 0.2), ]
def get_test_data_batches(data_dir='/home/yunhan/data_dir'):
for i in range(17):
X = np.load("%s/X_%d.npy" % (data_dir, 3000*(i+1)))/255.
yield X
|
normal
|
{
"blob_id": "c04c38d78144b6f5d3e5af4ebe9ce430e882a367",
"index": 8014,
"step-1": "<mask token>\n\n\ndef get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_test_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_test_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_batches_mono(data_dir):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n X = X / 255\n Y = np.load('/home/yunhan/data_dir/train_y_224.npy')\n return [(X, Y, 32, 0.2)]\n\n\ndef get_test_data_batches(data_dir='/home/yunhan/data_dir'):\n for i in range(17):\n X = np.load('%s/X_%d.npy' % (data_dir, 3000 * (i + 1))) / 255.0\n yield X\n",
"step-3": "<mask token>\n\n\ndef get_train_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_test_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_batches_mono(data_dir):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n X = X / 255\n Y = np.load('/home/yunhan/data_dir/train_y_224.npy')\n return [(X, Y, 32, 0.2)]\n\n\ndef get_test_data_batches(data_dir='/home/yunhan/data_dir'):\n for i in range(17):\n X = np.load('%s/X_%d.npy' % (data_dir, 3000 * (i + 1))) / 255.0\n yield X\n",
"step-4": "import numpy as np\n\n\ndef get_train_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_test_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load('%s/X%d.npy' % (data_dir, idx[i])) / 255.0\n Y = np.load('%s/y%d.npy' % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_batches_mono(data_dir):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n X = X / 255\n Y = np.load('/home/yunhan/data_dir/train_y_224.npy')\n return [(X, Y, 32, 0.2)]\n\n\ndef get_test_data_batches(data_dir='/home/yunhan/data_dir'):\n for i in range(17):\n X = np.load('%s/X_%d.npy' % (data_dir, 3000 * (i + 1))) / 255.0\n yield X\n",
"step-5": "import numpy as np\n\n\ndef get_train_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_test_batches(data_dir='/home/yunhan/batchified'):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y\n\n\ndef get_batches_mono(data_dir):\n \"\"\"\n return a list or generator of (large) ndarrays,\n in order to efficiently utilize GPU\n \"\"\"\n X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n # X = np.load('train_x_sample.npy')\n X = X / 255\n # X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n Y = np.load('/home/yunhan/data_dir/train_y_224.npy')\n # Y = np.load('train_y_sample.npy')\n return [(X, Y, 32, 0.2), ]\n\n\ndef get_test_data_batches(data_dir='/home/yunhan/data_dir'):\n for i in range(17):\n X = np.load(\"%s/X_%d.npy\" % (data_dir, 3000*(i+1)))/255.\n yield X\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import datetime
import pendulum
import requests
from prefect import task, Flow, Parameter
from prefect.engine.signals import SKIP
from prefect.tasks.notifications.slack_task import SlackTask
from prefect.tasks.secrets import Secret
city = Parameter(name="City", default="San Jose")
api_key = Secret("WEATHER_API_KEY")
@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))
def pull_forecast(city, api_key):
"""
Extract the 5-day 3-hour forecast for the provided City.
"""
base_url = "http://api.openweathermap.org/data/2.5/forecast?"
url = base_url + "appid=" + api_key + "&q=" + city
r = requests.get(url)
r.raise_for_status()
data = r.json()
return data
@task
def is_raining_tomorrow(data):
"""
Given a list of hourly forecasts, returns a boolean specifying
whether there is rain in tomorrow's forecast.
"""
pendulum.now("utc").add(days=1).strftime("%Y-%m-%d")
rain = [
w
for forecast in data["list"]
for w in forecast["weather"]
if w["main"] == "Rain" and forecast["dt_txt"].startswith(tomorrow)
]
if not bool(rain):
raise SKIP("There is no rain in the forecast for tomorrow.")
notification = SlackTask(
message="There is rain in the forecast for tomorrow - better take your umbrella out!",
webhook_secret="DAVID_SLACK_URL",
)
with Flow("Umbrella Flow") as flow:
forecast = pull_forecast(city=city, api_key=api_key)
rain = is_raining_tomorrow(forecast)
notification.set_upstream(rain)
|
normal
|
{
"blob_id": "7f52354487f85a0bf1783c8aa76f228ef17e6d6b",
"index": 5119,
"step-1": "<mask token>\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\n<mask token>\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-3": "<mask token>\ncity = Parameter(name='City', default='San Jose')\napi_key = Secret('WEATHER_API_KEY')\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\nnotification = SlackTask(message=\n 'There is rain in the forecast for tomorrow - better take your umbrella out!'\n , webhook_secret='DAVID_SLACK_URL')\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-4": "import datetime\nimport pendulum\nimport requests\nfrom prefect import task, Flow, Parameter\nfrom prefect.engine.signals import SKIP\nfrom prefect.tasks.notifications.slack_task import SlackTask\nfrom prefect.tasks.secrets import Secret\ncity = Parameter(name='City', default='San Jose')\napi_key = Secret('WEATHER_API_KEY')\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = 'http://api.openweathermap.org/data/2.5/forecast?'\n url = base_url + 'appid=' + api_key + '&q=' + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now('utc').add(days=1).strftime('%Y-%m-%d')\n rain = [w for forecast in data['list'] for w in forecast['weather'] if \n w['main'] == 'Rain' and forecast['dt_txt'].startswith(tomorrow)]\n if not bool(rain):\n raise SKIP('There is no rain in the forecast for tomorrow.')\n\n\nnotification = SlackTask(message=\n 'There is rain in the forecast for tomorrow - better take your umbrella out!'\n , webhook_secret='DAVID_SLACK_URL')\nwith Flow('Umbrella Flow') as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-5": "import datetime\nimport pendulum\nimport requests\nfrom prefect import task, Flow, Parameter\nfrom prefect.engine.signals import SKIP\nfrom prefect.tasks.notifications.slack_task import SlackTask\nfrom prefect.tasks.secrets import Secret\n\n\ncity = Parameter(name=\"City\", default=\"San Jose\")\napi_key = Secret(\"WEATHER_API_KEY\")\n\n\n@task(max_retries=2, retry_delay=datetime.timedelta(seconds=5))\ndef pull_forecast(city, api_key):\n \"\"\"\n Extract the 5-day 3-hour forecast for the provided City.\n \"\"\"\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data\n\n\n@task\ndef is_raining_tomorrow(data):\n \"\"\"\n Given a list of hourly forecasts, returns a boolean specifying\n whether there is rain in tomorrow's forecast.\n \"\"\"\n pendulum.now(\"utc\").add(days=1).strftime(\"%Y-%m-%d\")\n rain = [\n w\n for forecast in data[\"list\"]\n for w in forecast[\"weather\"]\n if w[\"main\"] == \"Rain\" and forecast[\"dt_txt\"].startswith(tomorrow)\n ]\n if not bool(rain):\n raise SKIP(\"There is no rain in the forecast for tomorrow.\")\n\n\nnotification = SlackTask(\n message=\"There is rain in the forecast for tomorrow - better take your umbrella out!\",\n webhook_secret=\"DAVID_SLACK_URL\",\n)\n\n\nwith Flow(\"Umbrella Flow\") as flow:\n forecast = pull_forecast(city=city, api_key=api_key)\n rain = is_raining_tomorrow(forecast)\n notification.set_upstream(rain)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
sheik=['a','e','i','o','u','A','E','I','O','U']
s=raw_input()
if(s in sheik):
print('Vowel')
elif(s!=sheik):
print('Consonant')
else:
print('invalid')
|
normal
|
{
"blob_id": "0fb8a9b1073446a62b46a802da69b66e78533c2a",
"index": 7293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif s in sheik:\n print('Vowel')\nelif s != sheik:\n print('Consonant')\nelse:\n print('invalid')\n",
"step-3": "sheik = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\ns = raw_input()\nif s in sheik:\n print('Vowel')\nelif s != sheik:\n print('Consonant')\nelse:\n print('invalid')\n",
"step-4": "sheik=['a','e','i','o','u','A','E','I','O','U']\ns=raw_input()\nif(s in sheik):\n\tprint('Vowel')\nelif(s!=sheik):\n\tprint('Consonant')\nelse:\n\tprint('invalid')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
* @Author: Mohammad Fatha.
* @Date: 2021-09-17 19:50
* @Last Modified by: Mohammad Fatha
* @Last Modified time: 2021-09-17 19:55
* @Title: Gambler Game
'''
import random
def gamblerProblem():
"""
Description:
This function Simulates a gambler who start with stake and place fair 1 bets until
he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of
times he/she wins and the number of bets he/she makes. Run the experiment N
times, averages the results, print the results.
"""
stake=int(input("Enter The The Stake Amount:"))
goal=int(input("Enter The Amount You Want To Win:"))
bet_made=int(input("Enter The The Number Of Bets You Want To Make:"))
no_of_times_won=0
no_of_time_lost=0
no_of_bets_made=0
while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made):
no_of_bets_made+=1
gambler_choice=random.randint(0, 1) #generates a random number 0 or 1
if gambler_choice==1: #if the random number generated is 0
no_of_times_won+=1
stake=stake+1
else:
no_of_time_lost+=1
stake=stake-1
percentage_win = (no_of_times_won/bet_made)*100
print("Number Of Times Won",no_of_times_won)
print("Percentage Of Win", percentage_win)
print("Percentage Of Loss", 100-percentage_win)
print("Number Of Bets Made", no_of_bets_made)
if __name__ == '__main__':
gamblerProblem()
|
normal
|
{
"blob_id": "68904be892968d4a1d82a59a31b95a8133a30832",
"index": 8790,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\nif __name__ == '__main__':\n gamblerProblem()\n",
"step-4": "<mask token>\nimport random\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\nif __name__ == '__main__':\n gamblerProblem()\n",
"step-5": "'''\n* @Author: Mohammad Fatha.\n* @Date: 2021-09-17 19:50 \n* @Last Modified by: Mohammad Fatha\n* @Last Modified time: 2021-09-17 19:55\n* @Title: Gambler Game\n'''\nimport random\n \ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake=int(input(\"Enter The The Stake Amount:\"))\n goal=int(input(\"Enter The Amount You Want To Win:\"))\n bet_made=int(input(\"Enter The The Number Of Bets You Want To Make:\"))\n no_of_times_won=0\n no_of_time_lost=0\n no_of_bets_made=0\n\n while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made):\n no_of_bets_made+=1\n gambler_choice=random.randint(0, 1) #generates a random number 0 or 1\n \n if gambler_choice==1: #if the random number generated is 0\n no_of_times_won+=1\n stake=stake+1 \n else:\n no_of_time_lost+=1\n stake=stake-1\n\n percentage_win = (no_of_times_won/bet_made)*100\n print(\"Number Of Times Won\",no_of_times_won)\n print(\"Percentage Of Win\", percentage_win) \n print(\"Percentage Of Loss\", 100-percentage_win)\n print(\"Number Of Bets Made\", no_of_bets_made) \n \n\nif __name__ == '__main__':\n gamblerProblem() ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
DEFAULT_SERVER_LISTEN_PORT = 2011
DEFAULT_CLIENT_LISTEN_PORT = 2012
import pickle
import socket
from player import Player
from averageddata import *
import zlib
import g
import pygame
from collections import defaultdict
from periodic import Periodic
import random
from projectile import Projectile
TICKTIME = 0.05
class NetCommon:
netEntities = { "player": Player, "projectile":Projectile }
def __init__(self, listenPort):
#Make a UDP socket
self.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
self.sock.bind( ("0.0.0.0", listenPort) )
self.sock.settimeout(0.01)
self.packetSize = 0
self.t = 0
self.buf = ""
self.packetTimestamps = []
self.packetsPerSecond = 0
self.simulatedLatency = 0
self.simulatedRandomLatencyVal = 0
self.simulatedPacketloss = 0
self.simulatedRandomLatency = 0
self.simulatedPackets = []
self.packet_outbound_last_id = defaultdict(lambda:0)
self.packet_inbound_last_id = defaultdict(lambda:0)
self.packetloss = defaultdict(lambda:0)
self.ensured_send_packet_ids = defaultdict(lambda:0)
self.ensured_sent_packets = defaultdict(dict)
self.ensured_recv_packet_ids = defaultdict(lambda:-1)
self.ensured_packets_received_early = defaultdict(list)
self.resend_unconfirmed_timer = 0
self.averagedData = AveragedData()
self.netinfotimer = 1.0
self.debug_lines = []
self.periodic = Periodic()
self.periodic.add(self.resendUnconfirmed, 0.5)
def readPacket(self, info, data):
self.averagedData.add(self.t, "packets")
self.averagedData.add(self.t, "packetsize", len(data))
unpacked = pickle.loads(zlib.decompress(data))
addr, port = info
addrportstr = addr + ":" + str(port)
if "ensured_id" in unpacked:
if unpacked["ensured_id"] == self.ensured_recv_packet_ids[addrportstr]+1:
print "recv " + str(unpacked["ensured_id"])
self.ensured_recv_packet_ids[addrportstr] += 1
self.sendReceipt(addr, port, unpacked["ensured_id"])
elif unpacked["ensured_id"] < self.ensured_recv_packet_ids[addrportstr]+1:
print unpacked
print "got ensured packet twice; resending receipt for " + str(unpacked["ensured_id"])
self.sendReceipt(addr, port, unpacked["ensured_id"])
return []
else:
print "got packet " + str(unpacked["ensured_id"]) + " before " + str(self.ensured_recv_packet_ids[addrportstr]+1)
self.ensured_packets_received_early[addrportstr].append(unpacked)
return []
allPackets = []
to_remove = []
self.ensured_packets_received_early[addrportstr].sort(lambda a,b:cmp(a["ensured_id"], b["ensured_id"]))
for p in self.ensured_packets_received_early[addrportstr]:
print "resolving old " + str(p["ensured_id"])
if p["ensured_id"] <= self.ensured_recv_packet_ids[addrportstr]+1:
self.ensured_recv_packet_ids[addrportstr] += 1
self.sendReceipt(addr, port, p["ensured_id"])
allPackets.extend(self.readUnpackedPacket(p, addrportstr))
to_remove.append(p)
for p in to_remove:
self.ensured_packets_received_early[addrportstr].remove(p)
allPackets.extend(self.readUnpackedPacket(unpacked, addrportstr))
return allPackets
def sendReceipt(self, addr, port, q):
self.sendPacket({"type":"confirmReceipt","other_ensured_id":q}, addr, port)
def readUnpackedPacket(self, unpacked, addrportstr):
pid = unpacked["packet_id"]
lid = self.packet_inbound_last_id[addrportstr]
if pid > lid + 1:
self.packetloss[addrportstr] += 1
self.packet_inbound_last_id[addrportstr] = pid
if self.packet_inbound_last_id[addrportstr] > 0:
packetloss = self.packetloss[addrportstr] / float(self.packet_inbound_last_id[addrportstr])
self.averagedData.add(self.t, "packetloss_" + addrportstr, packetloss)
return [unpacked]
def sendPacket(self, data, addr, port):
print "packet: " + data["type"]
addrportstr = addr + ":" + str(port)
data["packet_id"] = self.packet_outbound_last_id[addrportstr]
self.packet_outbound_last_id[addrportstr] += 1
self.sock.sendto(zlib.compress(pickle.dumps(data, 2)), (addr, port))
def sendEnsuredPacket(self, data, addr, port):
addrportstr = addr + ":" + str(port)
ensured_id = self.ensured_send_packet_ids[addrportstr]
print "packet: " + data["type"] + " (ensured id: " + str(ensured_id) + ")"
data["packet_id"] = self.packet_outbound_last_id[addrportstr]
self.packet_outbound_last_id[addrportstr] += 1
data["ensured_id"] = ensured_id
cdata = zlib.compress(pickle.dumps(data, 2))
sent = {
"id":ensured_id,
"data":cdata,
"time":self.t,
"info":(addr,port)
}
self.ensured_sent_packets[addrportstr][ensured_id] = sent
self.sock.sendto(cdata, (addr, port))
self.ensured_send_packet_ids[addrportstr] = ensured_id + 1
def process_confirmReceipt(self, data, game, info):
(addr, port) = info
addrportstr = addr + ":" + str(port)
pending_packets = self.ensured_sent_packets[addrportstr]
pid = data["other_ensured_id"]
print "got receipt for " + str(pid)
if pid in pending_packets:
del pending_packets[pid]
else:
if pid > self.ensured_send_packet_ids:
print "got receipt for packet i haven't sent yet!!"
def update(self, game, dt):
self.game = game
self.t = pygame.time.get_ticks() / 1000.0
self.periodic.update()
self.packetsPerSecond = self.averagedData.get_ct(self.t, "packets", 1.0)
self.packetSize = self.averagedData.get_sum(self.t, "packetsize", 1.0)
allPackets = []
try:
(data, info) = self.sock.recvfrom(4096)
#self.packetSize = len(data)
if self.simulatedPacketloss > 0 and random.random() < self.simulatedPacketloss:
pass
else:
allPackets = self.readPacket(info, data)
except socket.timeout:
pass
except socket.error as err:
#print err
pass
#print self.simulatedPackets
if self.simulatedLatency == 0:
for d in allPackets:
self.process(d, game, info)
else:
off = self.simulatedLatency + self.simulatedRandomLatency * random.random()
self.simulatedPackets.extend( [(d, off, info) for d in allPackets] )
thisFramePackets = [ s for s in self.simulatedPackets if s[1] <= 0]
self.simulatedPackets = [ s for s in self.simulatedPackets if s[1] > 0 ]
for (p, t, info) in thisFramePackets:
self.process(p, game, info)
self.simulatedPackets = [ (s[0], s[1] - dt, s[2]) for s in self.simulatedPackets ]
def resendUnconfirmed(self):
for k,packets in self.ensured_sent_packets.items():
for i,packet in packets.items():
if self.t > packet["time"] + 1.5:
print "resending unreceipted packet: " + str(packet["id"])
self.sock.sendto(packet["data"], packet["info"])
def process(self, data, game, info):
if(hasattr(self, "process_" + data["type"])):
f = getattr(self, "process_" + data["type"])
f(data, game, info)
else:
print("Got packet of type '" + data["type"] + "' but there is no process_" + data["type"] + " method to handle it." )
|
normal
|
{
"blob_id": "b7be9fd366d03068a5d6c3cee703d579b9866fd3",
"index": 7992,
"step-1": "DEFAULT_SERVER_LISTEN_PORT = 2011\nDEFAULT_CLIENT_LISTEN_PORT = 2012\n\nimport pickle\nimport socket\nfrom player import Player\nfrom averageddata import *\nimport zlib\nimport g\nimport pygame\nfrom collections import defaultdict\nfrom periodic import Periodic\nimport random\nfrom projectile import Projectile\n\nTICKTIME = 0.05\n\nclass NetCommon:\n\tnetEntities = { \"player\": Player, \"projectile\":Projectile }\n\tdef __init__(self, listenPort):\n\t\t#Make a UDP socket\n\t\tself.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\n\t\tself.sock.bind( (\"0.0.0.0\", listenPort) )\n\t\tself.sock.settimeout(0.01)\n\t\tself.packetSize = 0\n\t\tself.t = 0\n\t\t\n\t\tself.buf = \"\"\n\n\t\tself.packetTimestamps = []\n\t\tself.packetsPerSecond = 0\n\t\t\n\t\tself.simulatedLatency = 0\n\t\tself.simulatedRandomLatencyVal = 0\n\t\tself.simulatedPacketloss = 0\n\n\t\tself.simulatedRandomLatency = 0\n\t\tself.simulatedPackets = []\n\n\t\tself.packet_outbound_last_id = defaultdict(lambda:0)\n\t\tself.packet_inbound_last_id = defaultdict(lambda:0)\n\t\tself.packetloss = defaultdict(lambda:0)\n\n\t\tself.ensured_send_packet_ids = defaultdict(lambda:0)\n\t\tself.ensured_sent_packets = defaultdict(dict)\n\t\tself.ensured_recv_packet_ids = defaultdict(lambda:-1)\n\t\tself.ensured_packets_received_early = defaultdict(list)\n\t\tself.resend_unconfirmed_timer = 0\n\n\t\tself.averagedData = AveragedData()\n\n\t\tself.netinfotimer = 1.0\n\n\t\tself.debug_lines = []\n\t\tself.periodic = Periodic()\n\t\tself.periodic.add(self.resendUnconfirmed, 0.5)\n\n\n\tdef readPacket(self, info, data):\n\t\tself.averagedData.add(self.t, \"packets\")\n\t\tself.averagedData.add(self.t, \"packetsize\", len(data))\n\t\tunpacked = pickle.loads(zlib.decompress(data))\n\n\t\taddr, port = info\n\t\taddrportstr = addr + \":\" + str(port)\n\n\t\tif \"ensured_id\" in unpacked:\n\t\t\tif unpacked[\"ensured_id\"] == self.ensured_recv_packet_ids[addrportstr]+1:\n\t\t\t\tprint \"recv \" + str(unpacked[\"ensured_id\"])\n\t\t\t\tself.ensured_recv_packet_ids[addrportstr] += 1\n\t\t\t\tself.sendReceipt(addr, port, unpacked[\"ensured_id\"])\n\t\t\telif unpacked[\"ensured_id\"] < self.ensured_recv_packet_ids[addrportstr]+1:\n\t\t\t\tprint unpacked\n\t\t\t\tprint \"got ensured packet twice; resending receipt for \" + str(unpacked[\"ensured_id\"])\n\t\t\t\tself.sendReceipt(addr, port, unpacked[\"ensured_id\"])\n\t\t\t\treturn []\n\t\t\telse:\n\t\t\t\tprint \"got packet \" + str(unpacked[\"ensured_id\"]) + \" before \" + str(self.ensured_recv_packet_ids[addrportstr]+1)\n\t\t\t\tself.ensured_packets_received_early[addrportstr].append(unpacked)\n\t\t\t\treturn []\n\n\t\tallPackets = []\n\t\tto_remove = []\n\t\tself.ensured_packets_received_early[addrportstr].sort(lambda a,b:cmp(a[\"ensured_id\"], b[\"ensured_id\"]))\n\t\tfor p in self.ensured_packets_received_early[addrportstr]:\n\t\t\tprint \"resolving old \" + str(p[\"ensured_id\"])\n\t\t\tif p[\"ensured_id\"] <= self.ensured_recv_packet_ids[addrportstr]+1:\n\t\t\t\tself.ensured_recv_packet_ids[addrportstr] += 1\n\t\t\t\tself.sendReceipt(addr, port, p[\"ensured_id\"])\n\t\t\t\tallPackets.extend(self.readUnpackedPacket(p, addrportstr))\n\t\t\t\tto_remove.append(p)\n\t\tfor p in to_remove:\n\t\t\tself.ensured_packets_received_early[addrportstr].remove(p)\n\n\t\tallPackets.extend(self.readUnpackedPacket(unpacked, addrportstr))\n\t\treturn allPackets\n\n\tdef sendReceipt(self, addr, port, q):\n\t\tself.sendPacket({\"type\":\"confirmReceipt\",\"other_ensured_id\":q}, addr, port)\n\n\tdef readUnpackedPacket(self, unpacked, addrportstr):\n\t\tpid = unpacked[\"packet_id\"]\n\t\tlid = self.packet_inbound_last_id[addrportstr]\n\t\tif pid > lid + 1:\n\t\t\tself.packetloss[addrportstr] += 1\n\t\tself.packet_inbound_last_id[addrportstr] = pid\n\n\t\tif self.packet_inbound_last_id[addrportstr] > 0:\n\t\t\tpacketloss = self.packetloss[addrportstr] / float(self.packet_inbound_last_id[addrportstr])\n\t\t\tself.averagedData.add(self.t, \"packetloss_\" + addrportstr, packetloss)\n\n\t\treturn [unpacked]\n\n\tdef sendPacket(self, data, addr, port):\n\t\tprint \"packet: \" + data[\"type\"]\n\t\taddrportstr = addr + \":\" + str(port)\n\t\tdata[\"packet_id\"] = self.packet_outbound_last_id[addrportstr]\n\t\tself.packet_outbound_last_id[addrportstr] += 1\n\t\tself.sock.sendto(zlib.compress(pickle.dumps(data, 2)), (addr, port))\n\n\tdef sendEnsuredPacket(self, data, addr, port):\n\t\taddrportstr = addr + \":\" + str(port)\t\t\n\t\tensured_id = self.ensured_send_packet_ids[addrportstr]\n\t\tprint \"packet: \" + data[\"type\"] + \" (ensured id: \" + str(ensured_id) + \")\"\n\t\tdata[\"packet_id\"] = self.packet_outbound_last_id[addrportstr]\n\t\tself.packet_outbound_last_id[addrportstr] += 1\t\t\n\t\tdata[\"ensured_id\"] = ensured_id\n\t\tcdata = zlib.compress(pickle.dumps(data, 2))\n\t\tsent = {\n\t\t\t\"id\":ensured_id,\n\t\t\t\"data\":cdata,\n\t\t\t\"time\":self.t,\n\t\t\t\"info\":(addr,port)\n\t\t}\n\t\tself.ensured_sent_packets[addrportstr][ensured_id] = sent\n\t\tself.sock.sendto(cdata, (addr, port))\n\t\tself.ensured_send_packet_ids[addrportstr] = ensured_id + 1\n\n\tdef process_confirmReceipt(self, data, game, info):\n\t\t(addr, port) = info\n\t\taddrportstr = addr + \":\" + str(port)\n\t\tpending_packets = self.ensured_sent_packets[addrportstr]\n\t\tpid = data[\"other_ensured_id\"]\n\t\tprint \"got receipt for \" + str(pid)\n\t\tif pid in pending_packets:\n\t\t\tdel pending_packets[pid]\n\t\telse:\n\t\t\tif pid > self.ensured_send_packet_ids:\n\t\t\t\tprint \"got receipt for packet i haven't sent yet!!\"\n\n\tdef update(self, game, dt):\n\t\tself.game = game\n\t\t\n\t\tself.t = pygame.time.get_ticks() / 1000.0\n\t\tself.periodic.update()\n\n\t\tself.packetsPerSecond = self.averagedData.get_ct(self.t, \"packets\", 1.0)\n\t\tself.packetSize = self.averagedData.get_sum(self.t, \"packetsize\", 1.0)\n\n\t\tallPackets = []\n\t\ttry:\n\t\t\t(data, info) = self.sock.recvfrom(4096)\n\t\t\t#self.packetSize = len(data)\n\t\t\tif self.simulatedPacketloss > 0 and random.random() < self.simulatedPacketloss:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tallPackets = self.readPacket(info, data)\n\t\texcept socket.timeout:\n\t\t\tpass\n\t\texcept socket.error as err:\n\t\t\t#print err\n\t\t\tpass\n\n\t\t#print self.simulatedPackets\n\t\tif self.simulatedLatency == 0:\n\t\t\tfor d in allPackets:\n\t\t\t\tself.process(d, game, info)\n\t\telse:\n\t\t\toff = self.simulatedLatency + self.simulatedRandomLatency * random.random()\n\t\t\tself.simulatedPackets.extend( [(d, off, info) for d in allPackets] )\n\t\t\tthisFramePackets = [ s for s in self.simulatedPackets if s[1] <= 0]\n\t\t\tself.simulatedPackets = [ s for s in self.simulatedPackets if s[1] > 0 ]\n\t\t\tfor (p, t, info) in thisFramePackets:\n\t\t\t\tself.process(p, game, info)\n\t\t\tself.simulatedPackets = [ (s[0], s[1] - dt, s[2]) for s in self.simulatedPackets ]\n\n\n\tdef resendUnconfirmed(self):\n\t\tfor k,packets in self.ensured_sent_packets.items():\n\t\t\tfor i,packet in packets.items():\n\t\t\t\tif self.t > packet[\"time\"] + 1.5:\n\t\t\t\t\tprint \"resending unreceipted packet: \" + str(packet[\"id\"])\n\t\t\t\t\tself.sock.sendto(packet[\"data\"], packet[\"info\"])\t\t\n\n\tdef process(self, data, game, info):\n\t\tif(hasattr(self, \"process_\" + data[\"type\"])):\n\t\t\tf = getattr(self, \"process_\" + data[\"type\"])\n\t\t\tf(data, game, info)\n\t\telse:\n\t\t\tprint(\"Got packet of type '\" + data[\"type\"] + \"' but there is no process_\" + data[\"type\"] + \" method to handle it.\" )\n\t\t\t",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
多线程更新UI数据(在两个线程中传递数据)
'''
from PyQt5.QtCore import QThread , pyqtSignal, QDateTime
from PyQt5.QtWidgets import QApplication, QDialog, QLineEdit
import time
import sys
class BackendThread(QThread):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString("yyyy-MM-dd hh:mm:ss")
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400,100)
self.input = QLineEdit(self)
self.input.resize(400,100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self,data):
self.input.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
example = ThreadUpdateUI()
example.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "ec625bf57388281b3cbd464459fc3ad1c60b7db9",
"index": 3305,
"step-1": "<mask token>\n\n\nclass BackendThread(QThread):\n <mask token>\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = ThreadUpdateUI()\n example.show()\n sys.exit(app.exec_())\n",
"step-4": "<mask token>\nfrom PyQt5.QtCore import QThread, pyqtSignal, QDateTime\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLineEdit\nimport time\nimport sys\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = ThreadUpdateUI()\n example.show()\n sys.exit(app.exec_())\n",
"step-5": "'''\n\n多线程更新UI数据(在两个线程中传递数据)\n\n'''\n\nfrom PyQt5.QtCore import QThread , pyqtSignal, QDateTime\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLineEdit\nimport time\nimport sys\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString(\"yyyy-MM-dd hh:mm:ss\")\n self.update_date.emit(str(currentTime))\n time.sleep(1)\nclass ThreadUpdateUI(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400,100)\n self.input = QLineEdit(self)\n self.input.resize(400,100)\n\n self.initUI()\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n\n self.backend.start()\n\n def handleDisplay(self,data):\n self.input.setText(data)\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = ThreadUpdateUI()\n example.show()\n sys.exit(app.exec_())",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
#!/usr/bin/env python3
# coding=utf-8
# title :paramiko_sftp.py
# description :
# author :JackieTsui
# organization :pytoday.org
# date :1/16/18 9:22 PM
# email :[email protected]
# notes :
# ==================================================
# Import the module needed to run the script
import paramiko
import os,sys,time
jumpip = "192.168.10.1"
jumpuser = "jackie"
jumppass = "123456"
hostname = "192.168.10.2"
user = "root"
password = "654321"
tmpdir = "/tmp"
remotedir = "/data"
localpath = "/home/nginx_access.tar.gz"
tmppath = tmpdir + "/nginx_access.tar.gz"
remotepath = remotedir + "/nginx_access_hd.tar.gz"
port = 22
passinfo = "'s password: "
paramiko.util.log_to_file('syslogin.log')
t = paramiko.Transport((jumpip, port))
t.connect(username=jumpuser, password=jumppass)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(localpath, remotepath)
sftp.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
channel = ssh.invoke_shell()
channel.settimeout(10)
buff = ""
resp = ""
channel.send("scp " + tmppath + " " + user + "@" + hostname + ":" + remotepath + "\n")
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print("Error info: " + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find("yes/no") == -1:
channel.send("yes\n")
buff = ""
channel.send(password + "\n")
buff = ""
while not buff.endswith("# "):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print("Error info: Auth failed.")
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
|
normal
|
{
"blob_id": "64cf6b03fb68be8a23c6e87c8d68d0a42db0eb54",
"index": 6451,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparamiko.util.log_to_file('syslogin.log')\n<mask token>\nt.connect(username=jumpuser, password=jumppass)\n<mask token>\nsftp.put(localpath, remotepath)\nsftp.close()\n<mask token>\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n<mask token>\nchannel.settimeout(10)\n<mask token>\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\n<mask token>\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-3": "<mask token>\njumpip = '192.168.10.1'\njumpuser = 'jackie'\njumppass = '123456'\nhostname = '192.168.10.2'\nuser = 'root'\npassword = '654321'\ntmpdir = '/tmp'\nremotedir = '/data'\nlocalpath = '/home/nginx_access.tar.gz'\ntmppath = tmpdir + '/nginx_access.tar.gz'\nremotepath = remotedir + '/nginx_access_hd.tar.gz'\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\nbuff = ''\nresp = ''\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\nbuff = ''\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-4": "import paramiko\nimport os, sys, time\njumpip = '192.168.10.1'\njumpuser = 'jackie'\njumppass = '123456'\nhostname = '192.168.10.2'\nuser = 'root'\npassword = '654321'\ntmpdir = '/tmp'\nremotedir = '/data'\nlocalpath = '/home/nginx_access.tar.gz'\ntmppath = tmpdir + '/nginx_access.tar.gz'\nremotepath = remotedir + '/nginx_access_hd.tar.gz'\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\nbuff = ''\nresp = ''\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\nbuff = ''\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-5": "#!/usr/bin/env python3\n# coding=utf-8\n# title :paramiko_sftp.py\n# description :\n# author :JackieTsui\n# organization :pytoday.org\n# date :1/16/18 9:22 PM\n# email :[email protected]\n# notes :\n# ==================================================\n\n# Import the module needed to run the script\nimport paramiko\nimport os,sys,time\n\n\njumpip = \"192.168.10.1\"\njumpuser = \"jackie\"\njumppass = \"123456\"\nhostname = \"192.168.10.2\"\nuser = \"root\"\npassword = \"654321\"\n\ntmpdir = \"/tmp\"\nremotedir = \"/data\"\nlocalpath = \"/home/nginx_access.tar.gz\"\ntmppath = tmpdir + \"/nginx_access.tar.gz\"\nremotepath = remotedir + \"/nginx_access_hd.tar.gz\"\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\n\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\n\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\n\nbuff = \"\"\nresp = \"\"\nchannel.send(\"scp \" + tmppath + \" \" + user + \"@\" + hostname + \":\" + remotepath + \"\\n\")\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print(\"Error info: \" + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find(\"yes/no\") == -1:\n channel.send(\"yes\\n\")\n buff = \"\"\n\nchannel.send(password + \"\\n\")\n\nbuff = \"\"\nwhile not buff.endswith(\"# \"):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print(\"Error info: Auth failed.\")\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n\nprint(buff)\nchannel.close()\nssh.close()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import glob
import json
import time
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from SimpleDataLoader import CustomDataset, get_params_from_filename
import numpy as np
from DNN_model import Net
import torch.optim as optim
import torch.nn as nn
import torch
from tqdm import tqdm
from MMS_compute import xpress_solver
import copy
path_to_data = 'Dataset'
def split_to_train_validation(path_to_data):
dataset = CustomDataset(path_to_data)
print(len(dataset))
batch_size = 300
validation_split = 0.2
shuffle_dataset = True
random_seed= 56
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
print(len(train_indices), len(val_indices))
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler)
validation_loader = DataLoader(dataset, batch_size=batch_size,
sampler=valid_sampler)
print(len(train_loader), len(validation_loader))
return train_loader, validation_loader
train_loader, validation_loader = split_to_train_validation(path_to_data)
net = Net()
loss_func = nn.MSELoss()
# loss_func = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
def compute_loss(dataloader, net):
loss = 0
if torch.cuda.is_available():
net.cuda()
net.eval()
n_batches = 0
with torch.no_grad():
for x, y in dataloader:
n_batches += 1
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
pred = net(x)
loss += loss_func(pred, y).item()
loss = loss / n_batches
return loss
n_epochs = 50
pbar = tqdm(range(n_epochs))
validation_loss_vs_epoch = []
if torch.cuda.is_available():
net.cuda()
for epoch in pbar:
if len(validation_loss_vs_epoch) > 1:
print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(validation_loss_vs_epoch[-1]))
net.train() # put the net into "training mode"
for x, y in train_loader:
y = y.to(torch.float32)
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
optimizer.zero_grad()
pred = net(x)
loss = loss_func(pred, y)
loss.backward()
optimizer.step()
net.eval() # put the net into evaluation mode
valid_loss = compute_loss(validation_loader, net)
validation_loss_vs_epoch.append(valid_loss)
# n = 5
# m = 50
# max_val = 100
# values = [random.randrange(0, max_val + 1) for _ in range(m)]
# values.sort(reverse=True)
# values += [0]*50
# mms = xpress_solver(values,n)[0]
# sum_vals = sum(values)
# new_values = [val/sum_vals for val in values]
# pred = net(torch.FloatTensor([float(n)]+new_values))
# pred_num = float(pred.data[0])
# print(pred, mms, pred*sum_vals)
# print(pred_num*sum_vals)
def zero_pad(values, max_m):
m = len(values)
values += [0] * (max_m - m)
def solve_with_solver(values_copy, n):
return xpress_solver(values_copy, n)
def solve_with_net(values_copy, n):
start = time.time()
sum_vals = sum(values_copy)
new_values = [val / sum_vals for val in values_copy]
pred = net(torch.FloatTensor([float(n)] + new_values))
pred_num = float(pred.data[0])
final_result = pred_num*sum_vals
end = time.time()
return final_result, end-start
def test_net(path):
max_m = 100
filelist = glob.glob(path + '/*.json')
print(len(filelist))
test_result = dict()
filelist_len = len(filelist)
for count, filename in enumerate(filelist):
n, m, max_val = get_params_from_filename(filename)
data_list_in_file = []
with open(filename) as jsonFile:
data_list_in_file = json.load(jsonFile)
idx = random.randint(0, len(data_list_in_file)-1)
example=data_list_in_file[idx]
values = example[0]["values"]
values_copy = copy.deepcopy(values)
values_copy.sort(reverse=True)
solver_result, solver_time = solve_with_solver(values_copy, n)
zero_pad(values_copy, max_m)
net_result, net_time = solve_with_net(values_copy, n)
test_result[str((n, m, max_val))] = {
'values_idx': idx,
'solver_result': solver_result,
'solver_time':solver_time,
'net_result':net_result,
'net_time':net_time
}
if count % 20 == 0:
print(count, 'out of', filelist_len)
test_result_path = './TestResults/test_results.json'
with open(test_result_path, 'w+') as json_file:
json.dump(test_result, json_file, indent=4)
test_net(path_to_data)
|
normal
|
{
"blob_id": "1f63f9234596787e4859b740d3a7fbfaacc9c0c8",
"index": 9930,
"step-1": "<mask token>\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_to_train_validation(path_to_data):\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\n<mask token>\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n loss += loss_func(pred, y).item()\n loss = loss / n_batches\n return loss\n\n\n<mask token>\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef split_to_train_validation(path_to_data):\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\n<mask token>\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n loss += loss_func(pred, y).item()\n loss = loss / n_batches\n return loss\n\n\n<mask token>\nif torch.cuda.is_available():\n net.cuda()\nfor epoch in pbar:\n if len(validation_loss_vs_epoch) > 1:\n print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(\n validation_loss_vs_epoch[-1]))\n net.train()\n for x, y in train_loader:\n y = y.to(torch.float32)\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n optimizer.zero_grad()\n pred = net(x)\n loss = loss_func(pred, y)\n loss.backward()\n optimizer.step()\n net.eval()\n valid_loss = compute_loss(validation_loader, net)\n validation_loss_vs_epoch.append(valid_loss)\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\ndef test_net(path):\n max_m = 100\n filelist = glob.glob(path + '/*.json')\n print(len(filelist))\n test_result = dict()\n filelist_len = len(filelist)\n for count, filename in enumerate(filelist):\n n, m, max_val = get_params_from_filename(filename)\n data_list_in_file = []\n with open(filename) as jsonFile:\n data_list_in_file = json.load(jsonFile)\n idx = random.randint(0, len(data_list_in_file) - 1)\n example = data_list_in_file[idx]\n values = example[0]['values']\n values_copy = copy.deepcopy(values)\n values_copy.sort(reverse=True)\n solver_result, solver_time = solve_with_solver(values_copy, n)\n zero_pad(values_copy, max_m)\n net_result, net_time = solve_with_net(values_copy, n)\n test_result[str((n, m, max_val))] = {'values_idx': idx,\n 'solver_result': solver_result, 'solver_time': solver_time,\n 'net_result': net_result, 'net_time': net_time}\n if count % 20 == 0:\n print(count, 'out of', filelist_len)\n test_result_path = './TestResults/test_results.json'\n with open(test_result_path, 'w+') as json_file:\n json.dump(test_result, json_file, indent=4)\n\n\ntest_net(path_to_data)\n",
"step-4": "import random\nimport glob\nimport json\nimport time\nfrom torch.utils.data import Dataset, DataLoader, SubsetRandomSampler\nfrom SimpleDataLoader import CustomDataset, get_params_from_filename\nimport numpy as np\nfrom DNN_model import Net\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nfrom tqdm import tqdm\nfrom MMS_compute import xpress_solver\nimport copy\npath_to_data = 'Dataset'\n\n\ndef split_to_train_validation(path_to_data):\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\ntrain_loader, validation_loader = split_to_train_validation(path_to_data)\nnet = Net()\nloss_func = nn.MSELoss()\noptimizer = optim.Adam(net.parameters(), lr=0.0001)\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n loss += loss_func(pred, y).item()\n loss = loss / n_batches\n return loss\n\n\nn_epochs = 50\npbar = tqdm(range(n_epochs))\nvalidation_loss_vs_epoch = []\nif torch.cuda.is_available():\n net.cuda()\nfor epoch in pbar:\n if len(validation_loss_vs_epoch) > 1:\n print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(\n validation_loss_vs_epoch[-1]))\n net.train()\n for x, y in train_loader:\n y = y.to(torch.float32)\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n optimizer.zero_grad()\n pred = net(x)\n loss = loss_func(pred, y)\n loss.backward()\n optimizer.step()\n net.eval()\n valid_loss = compute_loss(validation_loader, net)\n validation_loss_vs_epoch.append(valid_loss)\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\ndef test_net(path):\n max_m = 100\n filelist = glob.glob(path + '/*.json')\n print(len(filelist))\n test_result = dict()\n filelist_len = len(filelist)\n for count, filename in enumerate(filelist):\n n, m, max_val = get_params_from_filename(filename)\n data_list_in_file = []\n with open(filename) as jsonFile:\n data_list_in_file = json.load(jsonFile)\n idx = random.randint(0, len(data_list_in_file) - 1)\n example = data_list_in_file[idx]\n values = example[0]['values']\n values_copy = copy.deepcopy(values)\n values_copy.sort(reverse=True)\n solver_result, solver_time = solve_with_solver(values_copy, n)\n zero_pad(values_copy, max_m)\n net_result, net_time = solve_with_net(values_copy, n)\n test_result[str((n, m, max_val))] = {'values_idx': idx,\n 'solver_result': solver_result, 'solver_time': solver_time,\n 'net_result': net_result, 'net_time': net_time}\n if count % 20 == 0:\n print(count, 'out of', filelist_len)\n test_result_path = './TestResults/test_results.json'\n with open(test_result_path, 'w+') as json_file:\n json.dump(test_result, json_file, indent=4)\n\n\ntest_net(path_to_data)\n",
"step-5": "import random\nimport glob\nimport json\nimport time\n\nfrom torch.utils.data import Dataset, DataLoader, SubsetRandomSampler\nfrom SimpleDataLoader import CustomDataset, get_params_from_filename\nimport numpy as np\nfrom DNN_model import Net\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nfrom tqdm import tqdm\nfrom MMS_compute import xpress_solver\nimport copy\n\n\npath_to_data = 'Dataset'\n\ndef split_to_train_validation(path_to_data):\n\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed= 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset :\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n train_loader = DataLoader(dataset, batch_size=batch_size,\n sampler=train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size,\n sampler=valid_sampler)\n\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\ntrain_loader, validation_loader = split_to_train_validation(path_to_data)\n\nnet = Net()\n\n\n\n\n\nloss_func = nn.MSELoss()\n# loss_func = nn.L1Loss()\noptimizer = optim.Adam(net.parameters(), lr=1e-4)\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n\n loss += loss_func(pred, y).item()\n\n loss = loss / n_batches\n return loss\n\n\n\n\nn_epochs = 50\n\npbar = tqdm(range(n_epochs))\nvalidation_loss_vs_epoch = []\n\nif torch.cuda.is_available():\n net.cuda()\n\nfor epoch in pbar:\n\n if len(validation_loss_vs_epoch) > 1:\n print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(validation_loss_vs_epoch[-1]))\n\n net.train() # put the net into \"training mode\"\n for x, y in train_loader:\n y = y.to(torch.float32)\n\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n\n optimizer.zero_grad()\n pred = net(x)\n loss = loss_func(pred, y)\n loss.backward()\n optimizer.step()\n\n net.eval() # put the net into evaluation mode\n\n valid_loss = compute_loss(validation_loader, net)\n\n validation_loss_vs_epoch.append(valid_loss)\n\n# n = 5\n# m = 50\n# max_val = 100\n# values = [random.randrange(0, max_val + 1) for _ in range(m)]\n# values.sort(reverse=True)\n# values += [0]*50\n# mms = xpress_solver(values,n)[0]\n# sum_vals = sum(values)\n# new_values = [val/sum_vals for val in values]\n# pred = net(torch.FloatTensor([float(n)]+new_values))\n# pred_num = float(pred.data[0])\n# print(pred, mms, pred*sum_vals)\n# print(pred_num*sum_vals)\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [val / sum_vals for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num*sum_vals\n end = time.time()\n return final_result, end-start\n\ndef test_net(path):\n max_m = 100\n filelist = glob.glob(path + '/*.json')\n print(len(filelist))\n\n test_result = dict()\n filelist_len = len(filelist)\n for count, filename in enumerate(filelist):\n n, m, max_val = get_params_from_filename(filename)\n data_list_in_file = []\n with open(filename) as jsonFile:\n data_list_in_file = json.load(jsonFile)\n idx = random.randint(0, len(data_list_in_file)-1)\n example=data_list_in_file[idx]\n values = example[0][\"values\"]\n values_copy = copy.deepcopy(values)\n values_copy.sort(reverse=True)\n solver_result, solver_time = solve_with_solver(values_copy, n)\n\n zero_pad(values_copy, max_m)\n net_result, net_time = solve_with_net(values_copy, n)\n test_result[str((n, m, max_val))] = {\n 'values_idx': idx,\n 'solver_result': solver_result,\n 'solver_time':solver_time,\n 'net_result':net_result,\n 'net_time':net_time\n }\n if count % 20 == 0:\n print(count, 'out of', filelist_len)\n test_result_path = './TestResults/test_results.json'\n with open(test_result_path, 'w+') as json_file:\n json.dump(test_result, json_file, indent=4)\n\ntest_net(path_to_data)",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
##################
#Drawing Generic Rest of Board/
##################
def drawBoard(canvas,data):
canvas.create_rectangle(10,10,data.width-10,data.height-10, fill = "dark green")
canvas.create_rectangle(187, 160, 200, 550, fill = "white")
canvas.create_rectangle(187, 160, 561, 173, fill = "white")
canvas.create_rectangle(561, 160, 574, 550, fill = "white")
canvas.create_text(data.width//2, data.height//4, text = ("You've selected Tile "+str(data.tileNumber)), font = "courier 20")
canvas.create_rectangle(50,50, 350,75, fill = "white", width = 4)
canvas.create_text(700, 40, text = "P1", fill = data.p1color)
canvas.create_text(700, 60, text = "P2", fill = data.p2color)
canvas.create_text(700, 80, text = "P3", fill = data.p3color)
canvas.create_text(700, 100, text = "P4", fill = data.p4color)
def drawMiddleTiles(canvas,data):
if data.played!=None:
for tile in data.played:
canvas.create_image(tile[0], tile[1], image = tile[2])
|
normal
|
{
"blob_id": "628e625be86053988cbaa3ddfe55f0538136e24d",
"index": 3599,
"step-1": "<mask token>\n",
"step-2": "def drawBoard(canvas, data):\n canvas.create_rectangle(10, 10, data.width - 10, data.height - 10, fill\n ='dark green')\n canvas.create_rectangle(187, 160, 200, 550, fill='white')\n canvas.create_rectangle(187, 160, 561, 173, fill='white')\n canvas.create_rectangle(561, 160, 574, 550, fill='white')\n canvas.create_text(data.width // 2, data.height // 4, text=\n \"You've selected Tile \" + str(data.tileNumber), font='courier 20')\n canvas.create_rectangle(50, 50, 350, 75, fill='white', width=4)\n canvas.create_text(700, 40, text='P1', fill=data.p1color)\n canvas.create_text(700, 60, text='P2', fill=data.p2color)\n canvas.create_text(700, 80, text='P3', fill=data.p3color)\n canvas.create_text(700, 100, text='P4', fill=data.p4color)\n\n\n<mask token>\n",
"step-3": "def drawBoard(canvas, data):\n canvas.create_rectangle(10, 10, data.width - 10, data.height - 10, fill\n ='dark green')\n canvas.create_rectangle(187, 160, 200, 550, fill='white')\n canvas.create_rectangle(187, 160, 561, 173, fill='white')\n canvas.create_rectangle(561, 160, 574, 550, fill='white')\n canvas.create_text(data.width // 2, data.height // 4, text=\n \"You've selected Tile \" + str(data.tileNumber), font='courier 20')\n canvas.create_rectangle(50, 50, 350, 75, fill='white', width=4)\n canvas.create_text(700, 40, text='P1', fill=data.p1color)\n canvas.create_text(700, 60, text='P2', fill=data.p2color)\n canvas.create_text(700, 80, text='P3', fill=data.p3color)\n canvas.create_text(700, 100, text='P4', fill=data.p4color)\n\n\ndef drawMiddleTiles(canvas, data):\n if data.played != None:\n for tile in data.played:\n canvas.create_image(tile[0], tile[1], image=tile[2])\n",
"step-4": "##################\n#Drawing Generic Rest of Board/\n##################\ndef drawBoard(canvas,data): \n canvas.create_rectangle(10,10,data.width-10,data.height-10, fill = \"dark green\")\n canvas.create_rectangle(187, 160, 200, 550, fill = \"white\") \n canvas.create_rectangle(187, 160, 561, 173, fill = \"white\")\n canvas.create_rectangle(561, 160, 574, 550, fill = \"white\")\n canvas.create_text(data.width//2, data.height//4, text = (\"You've selected Tile \"+str(data.tileNumber)), font = \"courier 20\") \n canvas.create_rectangle(50,50, 350,75, fill = \"white\", width = 4)\n canvas.create_text(700, 40, text = \"P1\", fill = data.p1color)\n canvas.create_text(700, 60, text = \"P2\", fill = data.p2color)\n canvas.create_text(700, 80, text = \"P3\", fill = data.p3color)\n canvas.create_text(700, 100, text = \"P4\", fill = data.p4color)\n \n \ndef drawMiddleTiles(canvas,data): \n if data.played!=None: \n for tile in data.played: \n canvas.create_image(tile[0], tile[1], image = tile[2]) \n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def dot_product(a, b):
ans = 0
for i in range(len(a)):
ans += a[i] * b[i]
return ans
n = int(input())
a = sorted(list(map(int, input().split())))
b = sorted(list(map(int, input().split())))
print(dot_product(a, b))
|
normal
|
{
"blob_id": "fc273a286a462cb673edaa2de2ecc6b9ca631004",
"index": 9824,
"step-1": "<mask token>\n",
"step-2": "def dot_product(a, b):\n ans = 0\n for i in range(len(a)):\n ans += a[i] * b[i]\n return ans\n\n\n<mask token>\n",
"step-3": "def dot_product(a, b):\n ans = 0\n for i in range(len(a)):\n ans += a[i] * b[i]\n return ans\n\n\n<mask token>\nprint(dot_product(a, b))\n",
"step-4": "def dot_product(a, b):\n ans = 0\n for i in range(len(a)):\n ans += a[i] * b[i]\n return ans\n\n\nn = int(input())\na = sorted(list(map(int, input().split())))\nb = sorted(list(map(int, input().split())))\nprint(dot_product(a, b))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from typing import List
class Solution:
def destCity(self, paths: List[List[str]]) ->str:
departCity = set()
destCity = []
for i in paths:
if i[1] not in departCity:
destCity.append(i[1])
if i[0] in destCity:
destCity.remove(i[0])
departCity.add(i[0])
return destCity[0]
|
normal
|
{
"blob_id": "03cc3bf37ea8d971550a89107161005901d842de",
"index": 2514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def destCity(self, paths: List[List[str]]) ->str:\n departCity = set()\n destCity = []\n for i in paths:\n if i[1] not in departCity:\n destCity.append(i[1])\n if i[0] in destCity:\n destCity.remove(i[0])\n departCity.add(i[0])\n return destCity[0]\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def destCity(self, paths: List[List[str]]) ->str:\n departCity = set()\n destCity = []\n for i in paths:\n if i[1] not in departCity:\n destCity.append(i[1])\n if i[0] in destCity:\n destCity.remove(i[0])\n departCity.add(i[0])\n return destCity[0]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
inputStr = """crruafyzloguvxwctqmphenbkd
srcjafyzlcguvrwctqmphenbkd
srijafyzlogbpxwctgmphenbkd
zrijafyzloguvxrctqmphendkd
srijabyzloguvowcqqmphenbkd
srijafyzsoguvxwctbmpienbkd
srirtfyzlognvxwctqmphenbkd
srijafyzloguvxwctgmphenbmq
senjafyzloguvxectqmphenbkd
srijafyeloguvxwwtqmphembkd
srijafyzlogurxtctqmpkenbkd
srijafyzlkguvxictqhphenbkd
srijafgzlogunxwctqophenbkd
shijabyzloguvxwctqmqhenbkd
srjoafyzloguvxwctqmphenbwd
srijafyhloguvxwmtqmphenkkd
srijadyzlogwvxwctqmphenbed
brijafyzloguvmwctqmphenhkd
smijafyzlhguvxwctqmphjnbkd
sriqafvzloguvxwctqmpheebkd
srijafyzloguvxwisqmpuenbkd
mrijakyuloguvxwctqmphenbkd
srnfafyzloguvxwctqmphgnbkd
srijadyzloguvxwhfqmphenbkd
srijafhzloguvxwctdmlhenbkd
srijafyzloguvxwcsqmphykbkd
srijafyzlogwvxwatqmphhnbkd
srijafyzlozqvxwctqmphenbku
srijafyzloguvxwcbamphenbgd
srijafyzlfguvxwctqmphzybkd
srijafyzloguqxwetqmphenkkd
srijafyylogubxwttqmphenbkd
srijafyzloguvxzctadphenbkd
srijafyzloguoxwhtqmchenbkd
srijafyzloguvxwcvqmzhenbko
srijnfyzloguvxwctqmchenjkd
srijaryzloggvxwctqzphenbkd
srijafhzleguvxwcxqmphenbkd
ssijafyzllguvxfctqmphenbkd
srijafyzloguvxdctqmfhenbcd
srijafyzloguvxfctqmplynbkd
srijaftzlogavxwcrqmphenbkd
sriwaoyzloguvxwctqmphenbtd
srijahyzlogunxwctqmphenbvd
srjjafyzloguzxwctumphenbkd
nrijafyzlxguvxwctqmphanbkd
srijafezlqguyxwctqmphenbkd
srijafygloguvxwjtqcphenbkd
erijafyzloguvxoctqmnhenbkd
ssijafyzllguvxwbtqmphenbkd
sriaafyzloguvxwctqqphenbkv
frijafyzloguvswctwmphenbkd
srijafyzyogkvxwctqmprenbkd
syijafyzuoguvxwctqmkhenbkd
srijafyzloganxwctqmphenbkf
srijafyzloguvxwftqmxhenbkq
srijafyflogxvxwctqmghenbkd
srijafyzsoguvxwctqmpjenwkd
srujafylloguvxwctqmphenckd
srijafyzlpzuvxwctqmphenbud
srijafyzlogfvxwctqmhhenbwd
srijafjzlogusxwctqmphepbkd
srijlfyzloguvxwctqfphenzkd
srijafyzlogwvxwctqyphenbqd
srijafyzloluvxwctqtphenukd
srizafyzlowuvxwctqmphqnbkd
sritafkzlkguvxwctqmphenbkd
sbijafdzloguvxgctqmphenbkd
crijafyeloguvxwctqmpsenbkd
srijafyvlogulxwctqmphenbkk
srijafyologuvxwctqmehegbkd
siijafyzloguvxwctjmphenbmd
srijafyzlupuvxwctqmpheabkd
srijafyzlogumxwctqqphanbkd
srijxfyzlogujxwcqqmphenbkd
irijafizeoguvxwctqmphenbkd
sgijafyzloguvtwctqmpfenbkd
srijzfyzloguvmwctnmphenbkd
srijafyzwohuvxwctqmthenbkd
srijafyzlhguvxoctqwphenbkd
srgjafyplogxvxwctqmphenbkd
srijafyqlogovxwctqzphenbkd
srijafjzloguvlnvtqmphenbkd
srijafyzooguvxwctqmphenvud
srijafyzgoguvxwctumphgnbkd
srijaffzloguvxwdqqmphenbkd
srijafyzlogugxwctqxphenbkr
srijafyzlogutxwctqmmcenbkd
srifafyzlhguwxwctqmphenbkd
mrimajyzloguvxwctqmphenbkd
sriyafyzloguvxwcthmphejbkd
srieakyzlokuvxwctqmphenbkd
srisafyzloguhxwctqmphecbkd
srijanyzloguvxcctqmxhenbkd
srijafyzypguvxwctqmqhenbkd
sryjtfyzlvguvxwctqmphenbkd
srijafyzlsguvxwctqmqfenbkd
srijafyzlogudxwbtqwphenbkd
srijysyzloguvxwctqmpvenbkd
srijafyzloggvxwjtqmphegbkd
srijgfyzloguvxwctqmbhdnbkd
ssijufyzloguvawctqmphenbkd
skojafyzloguvxwctqmphenbnd
srijafylloguvxwcqqmpienbkd
trioafyzloguvqwctqmphenbkd
srijafydloguvxwctqmpzjnbkd
saijafvzloguvxwcqqmphenbkd
srhjapyzloguvxwctqmbhenbkd
srijafyzlfguvxwcsqmpwenbkd
shijafyzboguvxwctqmphenbmd
srizafysloguvxwrtqmphenbkd
srijafyzloguvxwciqmwhenbkj
qrijafyzloduvxwctqmphenbko
srijefyuloguvxwctqmphenbed
srijafyzlobuvxwctqmphenhbd
srijafyzloxuvxwctqmpheabkq
srijafyzloguvrwctqmghenkkd
sfisafywloguvxwctqmphenbkd
srgjafyzlogurxwctqmphenbkp
srijafhzloguvxwcjqmphenhkd
srijafyylogufxwrtqmphenbkd
srijafyzvoguvxwzkqmphenbkd
sqijafyzloguvxwctqmpheqbxd
srijafyvloguvxwctqzpherbkd
srijufyzloguvxlcsqmphenbkd
srijafykloguvxlccqmphenbkd
srijafyzloguexwcrqmphenzkd
sridifyzloguyxwctqmphenbkd
srijafyzlogfvxwctqlphenbkl
srijafyzlodqdxwctqmphenbkd
srijafyzloruvxactqmphenekd
grijafyzloguvxpctmmphenbkd
srsjakyzloguvxwctqmphvnbkd
srikafyvloguvxwrtqmphenbkd
srijafyzloguvxwctqjpserbkd
jrijafyzloguvxwctqmpgesbkd
swijafyzluguvxwctqmfhenbkd
srijanynlogovxwctqmphenbkd
jrijafyzloguvxwctymphrnbkd
srinafyzloguvewctqmphenbzd
srijakyzloguvxwctqmphcnbka
srijafyhlobuvxwctqmphenbka
srijafyzcogusxwctqmphwnbkd
srijavyzlosuvxwctqmphjnbkd
orijafyzxoguvxwcnqmphenbkd
srijafyzlogcvxwvtqmthenbkd
srijapyzloauvxwctqmphenvkd
srijaflzloguhxwctqmphenbwd
smijafyzlonuvxwctqmphenbkw
jrijafyzloguvxwclqmnhenbkd
srijaqyzloguvqwctqmphenskd
srijasyzloguvxwctqmvhenbku
crijtfyzloguvxwctqmthenbkd
srrkafyzvoguvxwctqmphenbkd
srijatyzloguvewctqmphenbld
srfjafyyloguvnwctqmphenbkd
srijafyzloguvxwctqjpbenbkt
hrijafyzooguvxwctqmphenbld
srijafbzlogscxwctqmphenbkd
srinafyzlogxvxwctqqphenbkd
slijafyzloglvxwctqmphenbdd
srijafyzlogjvxwcsqmphenbld
sryjcfyzloguvewctqmphenbkd
srijafyzloguexwctqmohknbkd
jaijafyzlogevxwctqmphenbkd
srijafbzlogavxwctqmphenbki
srijafozlogpvxwctqmphgnbkd
srijdfyzloguvxwczqmphenbkm
srijafyzlobuvxwctqmphxndkd
mrijifyzlhguvxwctqmphenbkd
srijafyzloguvxbctumphjnbkd
srijafyzloyuvxwptqmphlnbkd
arijafyzloguvxwcsqmohenbkd
srijaftzioguvxwttqmphenbkd
srijafyzlqsuvxwctqmphxnbkd
srijafyzioguvxwctqnphetbkd
prijafbzloguvxdctqmphenbkd
srijaeyzlnguvxwmtqmphenbkd
srijofyzloguvqwctqmphonbkd
srixaryzpoguvxwctqmphenbkd
srijafyzlowuvxwcwhmphenbkd
srijafydloguvxwctqmptenikd
srijqfyzlogtvfwctqmphenbkd
srijafyzloguvxlctqmpvenbgd
srijafyzlbguvxwjtqgphenbkd
srijafyzlohuqxwctqmphenbka
srijafyzroguvxictqmphynbkd
srijafyzloguvxdctjmphenjkd
srijaoczloguvxwctqmphenbjd
srajafhzloguvxwctqmphenbke
srijofyzloduvxwctqmphanbkd
srijafytloguvxwmtnmphenbkd
srijafyzuoguvxwceqmpgenbkd
rrijafyzloyuvxwctqmphlnbkd
srljafyzloguvxictqmohenbkd
srijafyzlogulxwcrqrphenbkd
srajafyzloguvxwctqmphanbke
srijafyzlhguvxwxtqmpheabkd
sxijafyzloggwxwctqmphenbkd
srijafyultguvxwctqmphinbkd
srijafyzloguvtwctqmfhvnbkd
srijafwzloruvxwctquphenbkd
srbjafyzxoguuxwctqmphenbkd
erijafyzlxguvxbctqmphenbkd
srijagyzlojubxwctqmphenbkd
srijafyzloguvxwdtqmchenakd
srijafkzlogukxwctqiphenbkd
mridafyzloguvxwctqmphenrkd
szqjafyzloguvxwctqmpheibkd
srijahyzloguvxwctcmphenekd
srijafyzloguvxwczpuphenbkd
srijafyzcoguvfwctqmphenbkq
qriiafyzloguvxwctqmpheebkd
srijpfyzloguvxlctqmphenokd
srijzfyzlotuvxwcjqmphenbkd
srinafyqloguvxwctfmphenbkd
srijafyzlogjvxpltqmphenbkd
srijafyzlotuvxwutqmphenbtd
sridafyzloguvxwctqmpyenokd
srxjafyzqogyvxwctqmphenbkd
ssijafyzzoguvxwctqmphenbad
srijafrzloguvxwctqmphekpkd
srijafyzlfgrvxactqmphenbkd
srijafyzroguvxwttqmphekbkd
srijefyzloguvxwctqmpqenbrd
srijefycloguvxwctqmchenbkd
srzjafyzloguvxwcqqmphanbkd
srijauyzlhguvxwctqmphenbgd
srijafyzloguvmwvnqmphenbkd
srihafyzloguvlwotqmphenbkd
srigafyzloguvxwctqmphennsd
sriuafzzloguvxwcuqmphenbkd
srijavuzllguvxwctqmphenbkd
srijafjzloguvlnctqmphenbkd
lrirafyzloguvxwctqmphenbld
soijarxzloguvxwctqmphenbkd
srijapyzlnguvxwctqmdhenbkd
srijafyzkogujxmctqmphenbkd
srijafuzloguvxwcsqvphenbkd
srijagyzzoguvxwctqmpvenbkd
srijafyzlovuvxwctqmrhenbxd
srijafyzqoguvxwctwmpienbkd
sxijafyzloguvxwutqmphenlkd
srijafyzlhgzvxwctqmphqnbkd
srijajyzloguvxwcbwmphenbkd
srijazyzloguvxwhtqmphenbkx
srgjafyzloguvvwctqmphdnbkd
rrivafyzloguvxjctqmphenbkd
srijifyzdoguvxwctqmphenbka
hrijafyzloguvxectqmpheybkd"""
startTime = time.time()
inputList = list(map(str, inputStr.splitlines()))
numRepeatsChar = 0
doubleDupes = 0
tripleDupes = 0
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
checkSum = doubleDupes * tripleDupes
print('Checksum: ' + str(checkSum))
print("%s seconds" % (time.time() - startTime))
|
normal
|
{
"blob_id": "9620479e9ac27c1c7833c9a31b9cb18408b8d361",
"index": 4019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\n<mask token>\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-3": "<mask token>\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-4": "import time\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-5": "import time\n\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\n\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\n\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\n\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n \n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n \n elif hasDoubleDupes and hasTripleDupes:\n break\n\n print(doubleDupes)\n print(tripleDupes)\n\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\n\nprint(\"%s seconds\" % (time.time() - startTime))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from setuptools import setup, find_packages
setup(name="sk_processor", packages=find_packages())
|
normal
|
{
"blob_id": "de884413dcbd0e89e8bfcf5657fe189156d9a661",
"index": 1837,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='sk_processor', packages=find_packages())\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='sk_processor', packages=find_packages())\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(name=\"sk_processor\", packages=find_packages())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetch screen scores with customizable search criteria
that can be tailored to match your own requirements
in tab format
"""
import requests
from core import config as cfg
screen_id = 178
request_url = cfg.BASE_URL + "/screen/" + str(screen_id)
# These parameters can be modified to match any search criteria following
# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice
# In this instance, we've chosen to return results in "tab" format with a header, and
# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98
params = {
"accesskey": cfg.ACCESS_KEY,
"format": "tab",
"header": "yes",
"score1min": 0.9,
"score1max": 0.98
}
r = requests.get( request_url, params = params )
screen = r.text.splitlines( )
row_count = 0
data = {}
for row in screen :
# Skip the header, but you could have also simply turned
# it off with header: "no" as a parameter instead
if row_count == 0 :
row_count = row_count + 1
continue
# Tab files are tab delimited
row = row.split( "\t" )
# create a hash of results by gene identifier
data[row[1]] = row
# Print out data about the genes BRIX1, ASB4, and NOB1
print( data['55299'] )
print( data['51666'] )
print( data['28987'] )
"""
Output as of version 1.0.1:
['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
"""
|
normal
|
{
"blob_id": "80c6dd1c76b3ac56f34e36f571e8db3927994311",
"index": 8162,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-3": "<mask token>\nscreen_id = 178\nrequest_url = cfg.BASE_URL + '/screen/' + str(screen_id)\nparams = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',\n 'score1min': 0.9, 'score1max': 0.98}\nr = requests.get(request_url, params=params)\nscreen = r.text.splitlines()\nrow_count = 0\ndata = {}\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-4": "<mask token>\nimport requests\nfrom core import config as cfg\nscreen_id = 178\nrequest_url = cfg.BASE_URL + '/screen/' + str(screen_id)\nparams = {'accesskey': cfg.ACCESS_KEY, 'format': 'tab', 'header': 'yes',\n 'score1min': 0.9, 'score1max': 0.98}\nr = requests.get(request_url, params=params)\nscreen = r.text.splitlines()\nrow_count = 0\ndata = {}\nfor row in screen:\n if row_count == 0:\n row_count = row_count + 1\n continue\n row = row.split('\\t')\n data[row[1]] = row\nprint(data['55299'])\nprint(data['51666'])\nprint(data['28987'])\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFetch screen scores with customizable search criteria\nthat can be tailored to match your own requirements\nin tab format\n\"\"\"\n\nimport requests\nfrom core import config as cfg\n\nscreen_id = 178\nrequest_url = cfg.BASE_URL + \"/screen/\" + str(screen_id)\n\n# These parameters can be modified to match any search criteria following\n# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice\n# In this instance, we've chosen to return results in \"tab\" format with a header, and \n# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98\nparams = {\n \"accesskey\": cfg.ACCESS_KEY,\n \"format\": \"tab\",\n \"header\": \"yes\",\n \"score1min\": 0.9,\n \"score1max\": 0.98\n}\n\nr = requests.get( request_url, params = params )\nscreen = r.text.splitlines( )\n\nrow_count = 0\ndata = {}\nfor row in screen :\n\n # Skip the header, but you could have also simply turned\n # it off with header: \"no\" as a parameter instead\n if row_count == 0 :\n row_count = row_count + 1\n continue\n\n # Tab files are tab delimited\n row = row.split( \"\\t\" )\n \n # create a hash of results by gene identifier\n data[row[1]] = row\n\n# Print out data about the genes BRIX1, ASB4, and NOB1\nprint( data['55299'] )\nprint( data['51666'] )\nprint( data['28987'] )\n\n\"\"\" \nOutput as of version 1.0.1:\n['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']\n\"\"\"",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from numpy import sqrt
def Schout2ConTank(a, b, d):
# This function converts parameters from Schoutens notation to Cont-Tankov
# notation
## Code
th = d * b / sqrt(a ** 2 - b ** 2)
k = 1 / (d * sqrt(a ** 2 - b ** 2))
s = sqrt(d / sqrt(a ** 2 - b ** 2))
return th, k, s
|
normal
|
{
"blob_id": "4dda122a8c3a2aab62bb202945f6fb9cb73cf772",
"index": 8330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Schout2ConTank(a, b, d):\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n",
"step-3": "from numpy import sqrt\n\n\ndef Schout2ConTank(a, b, d):\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n",
"step-4": "from numpy import sqrt\n\n\ndef Schout2ConTank(a, b, d):\n # This function converts parameters from Schoutens notation to Cont-Tankov\n # notation\n\n ## Code\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 20:28:44 2019
@author: nicholustintzaw
"""
####################################################################################################
####################################################################################################
'''
project tite : social pension database - national level
purpose : data migration national social pension data check and summary statistics
developed by : Nicholus Tint Zaw
modified date : 3rd Dec 2019
follow-up action:
'''
####################################################################################################
####################################################################################################
### PLEASE, CHANGE YOUR DIRECTORY BELOW ###
masterdir = r'C:\Users\Age.ing\Dropbox\01_Eligable\_New_QRT_COMBINE_CHECK_Window'
### PLEASE, CHANGE THE CASH TRANSFER BUDGET YEAR QUARTER BELOW ###
qrt = '1st_qrt_2019_2020'
####################################################################################################
####################################################################################################
################ PLEASE, DON'T TOUCH ANY PYTHON CODES BELOW ########################################
####################################################################################################
####################################################################################################
####################################################################################################
### task 1: prepare the directory setting
####################################################################################################
import os
os.chdir(masterdir)
exec(open("01_newqs_directory.py", 'r', encoding="utf8").read())
####################################################################################################
### task 2: combined all completed new quarter files
####################################################################################################
## IN
# 02_new_register
exec(open("02_new_register.py", 'r', encoding="utf8").read())
# 03_moved_in
exec(open("03_moved_in.py", 'r', encoding="utf8").read())
# 04_false_death
exec(open("04_false_death.py", 'r', encoding="utf8").read())
# OUT
# 05_death
exec(open("05_death.py", 'r', encoding="utf8").read())
# 06_moved_out
exec(open("06_moved_out.py", 'r', encoding="utf8").read())
# 07_false_reg
exec(open("07_false_reg.py", 'r', encoding="utf8").read())
# COMBINED REPORT
# State and Region level combined
exec(open("08_combined_report.py", 'r', encoding="utf8").read())
####################################################################################################
|
normal
|
{
"blob_id": "5a2716fc7b4c0a56fbd0de5d45d71fb33320adf0",
"index": 2889,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.chdir(masterdir)\nexec(open('01_newqs_directory.py', 'r', encoding='utf8').read())\nexec(open('02_new_register.py', 'r', encoding='utf8').read())\nexec(open('03_moved_in.py', 'r', encoding='utf8').read())\nexec(open('04_false_death.py', 'r', encoding='utf8').read())\nexec(open('05_death.py', 'r', encoding='utf8').read())\nexec(open('06_moved_out.py', 'r', encoding='utf8').read())\nexec(open('07_false_reg.py', 'r', encoding='utf8').read())\nexec(open('08_combined_report.py', 'r', encoding='utf8').read())\n",
"step-3": "<mask token>\nmasterdir = (\n 'C:\\\\Users\\\\Age.ing\\\\Dropbox\\\\01_Eligable\\\\_New_QRT_COMBINE_CHECK_Window')\nqrt = '1st_qrt_2019_2020'\n<mask token>\nos.chdir(masterdir)\nexec(open('01_newqs_directory.py', 'r', encoding='utf8').read())\nexec(open('02_new_register.py', 'r', encoding='utf8').read())\nexec(open('03_moved_in.py', 'r', encoding='utf8').read())\nexec(open('04_false_death.py', 'r', encoding='utf8').read())\nexec(open('05_death.py', 'r', encoding='utf8').read())\nexec(open('06_moved_out.py', 'r', encoding='utf8').read())\nexec(open('07_false_reg.py', 'r', encoding='utf8').read())\nexec(open('08_combined_report.py', 'r', encoding='utf8').read())\n",
"step-4": "<mask token>\nmasterdir = (\n 'C:\\\\Users\\\\Age.ing\\\\Dropbox\\\\01_Eligable\\\\_New_QRT_COMBINE_CHECK_Window')\nqrt = '1st_qrt_2019_2020'\nimport os\nos.chdir(masterdir)\nexec(open('01_newqs_directory.py', 'r', encoding='utf8').read())\nexec(open('02_new_register.py', 'r', encoding='utf8').read())\nexec(open('03_moved_in.py', 'r', encoding='utf8').read())\nexec(open('04_false_death.py', 'r', encoding='utf8').read())\nexec(open('05_death.py', 'r', encoding='utf8').read())\nexec(open('06_moved_out.py', 'r', encoding='utf8').read())\nexec(open('07_false_reg.py', 'r', encoding='utf8').read())\nexec(open('08_combined_report.py', 'r', encoding='utf8').read())\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 4 20:28:44 2019\n\n@author: nicholustintzaw\n\"\"\"\n\n\n####################################################################################################\n####################################################################################################\n'''\nproject tite : social pension database - national level\npurpose : data migration national social pension data check and summary statistics\ndeveloped by : Nicholus Tint Zaw \nmodified date : 3rd Dec 2019\n\nfollow-up action:\n \n'''\n####################################################################################################\n####################################################################################################\n\n\n### PLEASE, CHANGE YOUR DIRECTORY BELOW ###\nmasterdir = r'C:\\Users\\Age.ing\\Dropbox\\01_Eligable\\_New_QRT_COMBINE_CHECK_Window'\n\n\n### PLEASE, CHANGE THE CASH TRANSFER BUDGET YEAR QUARTER BELOW ###\nqrt = '1st_qrt_2019_2020'\n\n\n\n\n####################################################################################################\n####################################################################################################\n################ PLEASE, DON'T TOUCH ANY PYTHON CODES BELOW ########################################\n####################################################################################################\n####################################################################################################\n\n\n\n\n####################################################################################################\n### task 1: prepare the directory setting\n####################################################################################################\n\nimport os\nos.chdir(masterdir)\n\nexec(open(\"01_newqs_directory.py\", 'r', encoding=\"utf8\").read())\n\n\n\n####################################################################################################\n### task 2: combined all completed new quarter files\n####################################################################################################\n\n \n## IN\n\n# 02_new_register\nexec(open(\"02_new_register.py\", 'r', encoding=\"utf8\").read())\n\n# 03_moved_in\nexec(open(\"03_moved_in.py\", 'r', encoding=\"utf8\").read())\n\n# 04_false_death\nexec(open(\"04_false_death.py\", 'r', encoding=\"utf8\").read())\n\n\n\n# OUT\n# 05_death\nexec(open(\"05_death.py\", 'r', encoding=\"utf8\").read())\n\n# 06_moved_out\nexec(open(\"06_moved_out.py\", 'r', encoding=\"utf8\").read())\n\n# 07_false_reg\nexec(open(\"07_false_reg.py\", 'r', encoding=\"utf8\").read())\n\n\n# COMBINED REPORT\n# State and Region level combined\nexec(open(\"08_combined_report.py\", 'r', encoding=\"utf8\").read())\n\n\n####################################################################################################\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import time
import urllib
import argparse
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from fake_useragent import UserAgent
from multiprocessing import Pool
from lxml.html import fromstring
import os, sys
#text = 'chowchowbaby'
#url='https://www.google.co.kr/search?q=' + text + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'
def search(url):
#Create a browser
browser=webdriver.Chrome(executable_path='C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe')
#Open the link
browser.get(url)
time.sleep(1)
element=browser.find_element_by_tag_name("body")
#Scroll down
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
browser.find_element_by_id("smb").click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
time.sleep(1)
#Get page source and close the browser
source=browser.page_source
browser.close()
return source
def download_image(link):
# Use a random user agent header
headers = {"User-Agent": ua.random}
# Get the image link
try:
r = requests.get("https://www.google.com" + link.get("href"), headers=headers)
except:
print("Cannot get link.")
title = str(fromstring(r.content).findtext(".//title"))
link = title.split(" ")[-1]
# Download the image
print("At : " + os.getcwd() + ", Downloading from " + link)
try:
if link.split(".")[-1] == ('jpg' or 'png' or 'jpeg'):
urllib.request.urlretrieve(link, link.split("/")[-1])
except:
pass
if __name__ == "__main__":
# parse command line options
parser = argparse.ArgumentParser()
parser.add_argument("keyword", help="the keyword to search")
args = parser.parse_args()
# set stack limit
sys.setrecursionlimit(100000000)
# get user input and search on google
query = args.keyword
#query = input("Enter the name you want to search")
url = "https://www.google.com/search?as_st=y&tbm=isch&as_q=" + query + \
"&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg"
source = search(url)
# Parse the page source and download pics
soup = BeautifulSoup(str(source), "html.parser")
ua = UserAgent()
# check directory and create if necessary
if not os.path.isdir(args.keyword):
os.makedirs(args.keyword)
os.chdir(str(os.getcwd()) + "/" + str(args.keyword))
# get the links
links = soup.find_all("a", class_="rg_l")
# open some processes to download
with Pool() as pool:
pool.map(download_image, links)
# 검색어
#search = 'chowchowbaby'
#url='https://www.google.co.kr/search?q=' + search + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'
# url
#driver = webdriver.Chrome(executable_path="C:\\Users\\inaee\\Downloads\\chromedriver_win32\\chromedriver.exe")
#driver.get(url)
#driver.implicitly_wait(2)
#num_of_pagedowns = 50
#elem = driver.find_element_by_xpath('/html/body')
#i = 0
#count = 1
#img = driver.find_elements_by_tag_name("img")
#while i < num_of_pagedowns:
#for item in img:
# if(count>0 and count<502):
# elem.send_keys(Keys.DOWN)
# time.sleep(1)
# full_name = "C:\\Program Files\\Python35\\강아지크롤러\\chowchowbaby\\" + str(count) + "_chowchowbaby.jpg"
# try:
# urllib.request.urlretrieve(item.get_attribute('src'), full_name)
# tfp=open(full_name,url)
# print(item.get_attribute('src')[:30] + " : ")
# except:
# urllib.request.urlretrieve(item.get_attribute('data-src'), full_name)
# tfp=open(full_name,url)
# print(item.get_attribute('data-src')[:30] + " : ")
# count = count+1
# i =i+1
#driver.Quit()
#print("Done.")
|
normal
|
{
"blob_id": "142a2ba3ec2f6b35f4339ed9fffe7357c1a85fa0",
"index": 219,
"step-1": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('keyword', help='the keyword to search')\n args = parser.parse_args()\n sys.setrecursionlimit(100000000)\n query = args.keyword\n url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +\n '&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'\n )\n source = search(url)\n soup = BeautifulSoup(str(source), 'html.parser')\n ua = UserAgent()\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n os.chdir(str(os.getcwd()) + '/' + str(args.keyword))\n links = soup.find_all('a', class_='rg_l')\n with Pool() as pool:\n pool.map(download_image, links)\n",
"step-4": "import requests\nimport time\nimport urllib\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool\nfrom lxml.html import fromstring\nimport os, sys\n\n\ndef search(url):\n browser = webdriver.Chrome(executable_path=\n 'C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n browser.get(url)\n time.sleep(1)\n element = browser.find_element_by_tag_name('body')\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n browser.find_element_by_id('smb').click()\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n time.sleep(1)\n source = browser.page_source\n browser.close()\n return source\n\n\ndef download_image(link):\n headers = {'User-Agent': ua.random}\n try:\n r = requests.get('https://www.google.com' + link.get('href'),\n headers=headers)\n except:\n print('Cannot get link.')\n title = str(fromstring(r.content).findtext('.//title'))\n link = title.split(' ')[-1]\n print('At : ' + os.getcwd() + ', Downloading from ' + link)\n try:\n if link.split('.')[-1] == ('jpg' or 'png' or 'jpeg'):\n urllib.request.urlretrieve(link, link.split('/')[-1])\n except:\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('keyword', help='the keyword to search')\n args = parser.parse_args()\n sys.setrecursionlimit(100000000)\n query = args.keyword\n url = ('https://www.google.com/search?as_st=y&tbm=isch&as_q=' + query +\n '&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg'\n )\n source = search(url)\n soup = BeautifulSoup(str(source), 'html.parser')\n ua = UserAgent()\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n os.chdir(str(os.getcwd()) + '/' + str(args.keyword))\n links = soup.find_all('a', class_='rg_l')\n with Pool() as pool:\n pool.map(download_image, links)\n",
"step-5": "import requests\nimport time\nimport urllib\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\nfrom multiprocessing import Pool\nfrom lxml.html import fromstring\nimport os, sys\n\n#text = 'chowchowbaby'\n#url='https://www.google.co.kr/search?q=' + text + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'\n\ndef search(url):\n #Create a browser\n browser=webdriver.Chrome(executable_path='C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\n\n #Open the link\n browser.get(url)\n time.sleep(1)\n\n element=browser.find_element_by_tag_name(\"body\")\n\n #Scroll down\n for i in range(30):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n browser.find_element_by_id(\"smb\").click()\n\n for i in range(50):\n element.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n\n time.sleep(1)\n\n #Get page source and close the browser\n source=browser.page_source\n browser.close()\n\n return source\n\n\ndef download_image(link):\n # Use a random user agent header\n headers = {\"User-Agent\": ua.random}\n\n # Get the image link\n try:\n r = requests.get(\"https://www.google.com\" + link.get(\"href\"), headers=headers)\n except:\n print(\"Cannot get link.\")\n title = str(fromstring(r.content).findtext(\".//title\"))\n link = title.split(\" \")[-1]\n\n # Download the image\n print(\"At : \" + os.getcwd() + \", Downloading from \" + link)\n try:\n if link.split(\".\")[-1] == ('jpg' or 'png' or 'jpeg'):\n\n urllib.request.urlretrieve(link, link.split(\"/\")[-1])\n except:\n pass\n\n\nif __name__ == \"__main__\":\n # parse command line options\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"keyword\", help=\"the keyword to search\")\n args = parser.parse_args()\n\n # set stack limit\n sys.setrecursionlimit(100000000)\n\n # get user input and search on google\n query = args.keyword\n\n\n #query = input(\"Enter the name you want to search\")\n\n\n\n url = \"https://www.google.com/search?as_st=y&tbm=isch&as_q=\" + query + \\\n \"&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=isz:lt,islt:svga,itp:photo,ift:jpg\"\n source = search(url)\n\n # Parse the page source and download pics\n soup = BeautifulSoup(str(source), \"html.parser\")\n ua = UserAgent()\n\n # check directory and create if necessary\n if not os.path.isdir(args.keyword):\n os.makedirs(args.keyword)\n\n os.chdir(str(os.getcwd()) + \"/\" + str(args.keyword))\n # get the links\n links = soup.find_all(\"a\", class_=\"rg_l\")\n\n # open some processes to download\n with Pool() as pool:\n pool.map(download_image, links)\n \n\n\n\n\n\n# 검색어\n#search = 'chowchowbaby'\n#url='https://www.google.co.kr/search?q=' + search + '&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiF2fPLn7zdAhUBEbwKHSLWBowQ_AUICigB&biw=809&bih=868&dpr=1.13'\n# url\n#driver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\inaee\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe\")\n#driver.get(url)\n#driver.implicitly_wait(2)\n\n\n#num_of_pagedowns = 50\n#elem = driver.find_element_by_xpath('/html/body') \n\n#i = 0\n#count = 1\n#img = driver.find_elements_by_tag_name(\"img\")\n\n#while i < num_of_pagedowns:\n#for item in img:\n# if(count>0 and count<502):\n# elem.send_keys(Keys.DOWN)\n# time.sleep(1)\n# full_name = \"C:\\\\Program Files\\\\Python35\\\\강아지크롤러\\\\chowchowbaby\\\\\" + str(count) + \"_chowchowbaby.jpg\"\n# try:\n# urllib.request.urlretrieve(item.get_attribute('src'), full_name)\n# tfp=open(full_name,url)\n# print(item.get_attribute('src')[:30] + \" : \")\n# except:\n# urllib.request.urlretrieve(item.get_attribute('data-src'), full_name)\n# tfp=open(full_name,url)\n# print(item.get_attribute('data-src')[:30] + \" : \")\n# count = count+1\n# i =i+1\n\n \n#driver.Quit()\n#print(\"Done.\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
__author__ = 'alexglenday'
def group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
df_labels = []
for df in list_df:
df_labels.append(df.columns[df_col_index])
df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in
zip(list_df, df_labels)})
df_all.plot()
def individual(list_df: list, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
for df in list_df:
df.plot()
|
normal
|
{
"blob_id": "d2632461fcdc39509610b96d43dd1ec42dae362f",
"index": 5229,
"step-1": "<mask token>\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-2": "<mask token>\n\n\ndef group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n df_labels = []\n for df in list_df:\n df_labels.append(df.columns[df_col_index])\n df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in\n zip(list_df, df_labels)})\n df_all.plot()\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-3": "<mask token>\n__author__ = 'alexglenday'\n\n\ndef group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n df_labels = []\n for df in list_df:\n df_labels.append(df.columns[df_col_index])\n df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in\n zip(list_df, df_labels)})\n df_all.plot()\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-4": "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n__author__ = 'alexglenday'\n\n\ndef group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n df_labels = []\n for df in list_df:\n df_labels.append(df.columns[df_col_index])\n df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in\n zip(list_df, df_labels)})\n df_all.plot()\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import contextlib
import datetime
import fnmatch
import os
import os.path
import re
import subprocess
import sys
import click
import dataset
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode("utf-8")
except UnicodeDecodeError:
data = data.decode("latin1")
return data
def load_commits(db, repo_name):
"""Load the commits from the current directory repo."""
SEP = "-=:=-=:=-=:=-=:=-=:=-=:=-=:=-"
GITLOG = f"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'"
SHORT_LINES = 5
# $ git log --format="format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b"
# ---------------------
# date: 2021-04-21T16:13:23-04:00
# hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6
# auth: [email protected]
# name: Julia Eskew
# subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)
# Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.
# ---------------------
# date: 2021-04-15T21:36:47-04:00
# hash: a1fe3d58dc112bd975f1237baaee787ba22929f1
# auth: [email protected]
# name: Albert (AJ) St. Aubin
# subj: [bug] Corrected issue where program dash showed incorrect completed count
# [MICROBA-1163]
#
# This change will correct an issue in the Program Dashboard where a user
# would see a course as completed, but not see their Certificate because
# it was not available to them yet.
# ---------------------
with db:
commit_table = db["commits"]
log = get_cmd_output(GITLOG)
for i, commit in enumerate(log.split(SEP + "\n")):
if commit:
lines = commit.split("\n", maxsplit=SHORT_LINES)
row = {"repo": repo_name}
for line in lines[:SHORT_LINES]:
key, val = line.split(": ", maxsplit=1)
row[key] = val
row["body"] = lines[SHORT_LINES].strip()
analyze_commit(row)
commit_table.insert(row)
STRICT = r"""(?x)
^
(?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)
(?P<breaking>!?):\s
(?P<subjtext>.+)
$
"""
LAX = r"""(?xi)
^
(?P<label>\w+)
(?:\(\w+\))?
(?P<breaking>!?):\s
(?P<subjtext>.+)
$
"""
def analyze_commit(row):
row["conventional"] = row["lax"] = False
m = re.search(STRICT, row["subj"])
if m:
row["conventional"] = True
else:
m = re.search(LAX, row["subj"])
if m:
row["lax"] = True
if m:
row["label"] = m["label"]
row["breaking"] = bool(m["breaking"])
row["subjtext"] = m["subjtext"]
row["bodylines"] = len(row["body"].splitlines())
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@click.command(help="Collect stats about commits in local git repos")
@click.option("--db", "dbfile", default="commits.db", help="SQLite database file to write to")
@click.option("--ignore", multiple=True, help="Repos to ignore")
@click.option("--require", help="A file that must exist to process the repo")
@click.argument("repos", nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect("sqlite:///" + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f"Ignoring {repo}")
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f"Skipping {repo}")
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = "/".join(repo_dir.split("/")[-2:])
load_commits(db, repo_name)
if __name__ == "__main__":
main()
# then:
# gittreeif nedbat/meta/installed python /src/ghorg/commitstats.py /src/ghorg/commits.db
#
# in sqlite:
# select strftime("%Y%W", date, "weekday 0") as yw, count(*) total, sum(conventional) as con from commits group by yw;
# select yw, total, con, cast((con*100.0)/total as integer) pctcon from (select strftime("%Y%W", date, "weekday 0") as yw, count(*) total, sum(conventional) as con from commits group by yw);
"""
select
weekend, total, con, cast((con*100.0)/total as integer) pctcon, bod, cast((bod*100.0)/total as integer) pctbod
from (
select
strftime("%Y%m%d", date, "weekday 0") as weekend,
count(*) total,
sum(conventional) as con, sum(bodylines > 0) as bod
from commits where repo = "edx/edx-platform" group by weekend
)
where weekend > '202009';
"""
|
normal
|
{
"blob_id": "16446c2c5612a14d4364cbefb949da0b473f7454",
"index": 7934,
"step-1": "<mask token>\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\n<mask token>\n\n\[email protected](help='Collect stats about commits in local git repos')\[email protected]('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\[email protected]('--ignore', multiple=True, help='Repos to ignore')\[email protected]('--require', help='A file that must exist to process the repo')\[email protected]('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.\n STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n data = data.decode('latin1')\n return data\n\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'\n GITLOG = (\n f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n )\n SHORT_LINES = 5\n with db:\n commit_table = db['commits']\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + '\\n')):\n if commit:\n lines = commit.split('\\n', maxsplit=SHORT_LINES)\n row = {'repo': repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(': ', maxsplit=1)\n row[key] = val\n row['body'] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\n\n<mask token>\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\[email protected]\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n\[email protected](help='Collect stats about commits in local git repos')\[email protected]('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\[email protected]('--ignore', multiple=True, help='Repos to ignore')\[email protected]('--require', help='A file that must exist to process the repo')\[email protected]('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.\n STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n data = data.decode('latin1')\n return data\n\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'\n GITLOG = (\n f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n )\n SHORT_LINES = 5\n with db:\n commit_table = db['commits']\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + '\\n')):\n if commit:\n lines = commit.split('\\n', maxsplit=SHORT_LINES)\n row = {'repo': repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(': ', maxsplit=1)\n row[key] = val\n row['body'] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\n\nSTRICT = \"\"\"(?x)\n ^\n (?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\nLAX = \"\"\"(?xi)\n ^\n (?P<label>\\\\w+)\n (?:\\\\(\\\\w+\\\\))?\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\[email protected]\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n\[email protected](help='Collect stats about commits in local git repos')\[email protected]('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\[email protected]('--ignore', multiple=True, help='Repos to ignore')\[email protected]('--require', help='A file that must exist to process the repo')\[email protected]('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "import contextlib\nimport datetime\nimport fnmatch\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport click\nimport dataset\n\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.\n STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n data = data.decode('latin1')\n return data\n\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'\n GITLOG = (\n f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n )\n SHORT_LINES = 5\n with db:\n commit_table = db['commits']\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + '\\n')):\n if commit:\n lines = commit.split('\\n', maxsplit=SHORT_LINES)\n row = {'repo': repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(': ', maxsplit=1)\n row[key] = val\n row['body'] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\n\nSTRICT = \"\"\"(?x)\n ^\n (?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\nLAX = \"\"\"(?xi)\n ^\n (?P<label>\\\\w+)\n (?:\\\\(\\\\w+\\\\))?\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\[email protected]\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n\[email protected](help='Collect stats about commits in local git repos')\[email protected]('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\[email protected]('--ignore', multiple=True, help='Repos to ignore')\[email protected]('--require', help='A file that must exist to process the repo')\[email protected]('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "import contextlib\nimport datetime\nimport fnmatch\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\n\nimport click\nimport dataset\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode(\"utf-8\")\n except UnicodeDecodeError:\n data = data.decode(\"latin1\")\n return data\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n\n SEP = \"-=:=-=:=-=:=-=:=-=:=-=:=-=:=-\"\n GITLOG = f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n SHORT_LINES = 5\n\n # $ git log --format=\"format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b\"\n # ---------------------\n # date: 2021-04-21T16:13:23-04:00\n # hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6\n # auth: [email protected]\n # name: Julia Eskew\n # subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)\n # Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.\n # ---------------------\n # date: 2021-04-15T21:36:47-04:00\n # hash: a1fe3d58dc112bd975f1237baaee787ba22929f1\n # auth: [email protected]\n # name: Albert (AJ) St. Aubin\n # subj: [bug] Corrected issue where program dash showed incorrect completed count\n # [MICROBA-1163]\n # \n # This change will correct an issue in the Program Dashboard where a user\n # would see a course as completed, but not see their Certificate because\n # it was not available to them yet.\n # ---------------------\n\n with db:\n commit_table = db[\"commits\"]\n\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + \"\\n\")):\n if commit:\n lines = commit.split(\"\\n\", maxsplit=SHORT_LINES)\n row = {\"repo\": repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(\": \", maxsplit=1)\n row[key] = val\n row[\"body\"] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\nSTRICT = r\"\"\"(?x)\n ^\n (?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)\n (?P<breaking>!?):\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\nLAX = r\"\"\"(?xi)\n ^\n (?P<label>\\w+)\n (?:\\(\\w+\\))?\n (?P<breaking>!?):\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\ndef analyze_commit(row):\n row[\"conventional\"] = row[\"lax\"] = False\n m = re.search(STRICT, row[\"subj\"])\n if m:\n row[\"conventional\"] = True\n else:\n m = re.search(LAX, row[\"subj\"])\n if m:\n row[\"lax\"] = True\n if m:\n row[\"label\"] = m[\"label\"]\n row[\"breaking\"] = bool(m[\"breaking\"])\n row[\"subjtext\"] = m[\"subjtext\"]\n row[\"bodylines\"] = len(row[\"body\"].splitlines())\n\[email protected]\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\[email protected](help=\"Collect stats about commits in local git repos\")\[email protected](\"--db\", \"dbfile\", default=\"commits.db\", help=\"SQLite database file to write to\")\[email protected](\"--ignore\", multiple=True, help=\"Repos to ignore\")\[email protected](\"--require\", help=\"A file that must exist to process the repo\")\[email protected](\"repos\", nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect(\"sqlite:///\" + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f\"Ignoring {repo}\")\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f\"Skipping {repo}\")\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = \"/\".join(repo_dir.split(\"/\")[-2:])\n load_commits(db, repo_name)\n\nif __name__ == \"__main__\":\n main()\n\n# then:\n# gittreeif nedbat/meta/installed python /src/ghorg/commitstats.py /src/ghorg/commits.db\n#\n# in sqlite:\n# select strftime(\"%Y%W\", date, \"weekday 0\") as yw, count(*) total, sum(conventional) as con from commits group by yw;\n# select yw, total, con, cast((con*100.0)/total as integer) pctcon from (select strftime(\"%Y%W\", date, \"weekday 0\") as yw, count(*) total, sum(conventional) as con from commits group by yw);\n\n\"\"\"\n select\n weekend, total, con, cast((con*100.0)/total as integer) pctcon, bod, cast((bod*100.0)/total as integer) pctbod\n from (\n select\n strftime(\"%Y%m%d\", date, \"weekday 0\") as weekend,\n count(*) total,\n sum(conventional) as con, sum(bodylines > 0) as bod\n from commits where repo = \"edx/edx-platform\" group by weekend\n )\n where weekend > '202009';\n\"\"\"\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 09:54:28 2020
@author: rushirajsinhparmar
"""
import matplotlib.pyplot as plt
from skimage import io
import numpy as np
from skimage.filters import threshold_otsu
import cv2
img = io.imread("texture.png", as_gray=True)
##################################################
#Variance - not a great way to quantify texture
from scipy import ndimage
k=7
img_mean = ndimage.uniform_filter(img, (k, k))
img_sqr_mean = ndimage.uniform_filter(img**2, (k, k))
img_var = img_sqr_mean - img_mean**2
plt.imshow(img_var, cmap='gray')
#######################################################
#GABOR - A great filter for texture but usually efficient
#if we know exact parameters. Good choice for generating features
#for machine learning
ksize=45
theta=np.pi/2
kernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype=cv2.CV_32F)
filtered_image = cv2.filter2D(img, cv2.CV_8UC3, kernel)
plt.imshow(filtered_image, cmap='gray')
###########################################################
#Entropy
#Entropy quantifies disorder.
#Since cell region has high variation in pixel values the entropy would be
#higher compared to scratch region
from skimage.filters.rank import entropy
from skimage.morphology import disk
entropy_img = entropy(img, disk(15))
plt.imshow(entropy_img)
#use otsu to threshold high vs low entropy regions.
plt.hist(entropy_img.flat, bins=100, range=(0,7)) #.flat returns the flattened numpy array (1D)
thresh = threshold_otsu(entropy_img)
#binarize the entropy image
binary = entropy_img <= thresh
plt.imshow(binary)
#Sum all pixels in the scratch region (values =1)
scratch_area = np.sum(binary == 1)
print("Scratched area is: ", scratch_area, "Square pixels")
scale = 0.45 # microns/pixel
print("Scratched area in sq. microns is: ", scratch_area*((scale)**2), "Square pixels")
|
normal
|
{
"blob_id": "ab6c3d3c6faa2d1fe5e064dbdebd8904b9434f15",
"index": 5214,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.imshow(img_var, cmap='gray')\n<mask token>\nplt.imshow(filtered_image, cmap='gray')\n<mask token>\nplt.imshow(entropy_img)\nplt.hist(entropy_img.flat, bins=100, range=(0, 7))\n<mask token>\nplt.imshow(binary)\n<mask token>\nprint('Scratched area is: ', scratch_area, 'Square pixels')\n<mask token>\nprint('Scratched area in sq. microns is: ', scratch_area * scale ** 2,\n 'Square pixels')\n",
"step-3": "<mask token>\nimg = io.imread('texture.png', as_gray=True)\n<mask token>\nk = 7\nimg_mean = ndimage.uniform_filter(img, (k, k))\nimg_sqr_mean = ndimage.uniform_filter(img ** 2, (k, k))\nimg_var = img_sqr_mean - img_mean ** 2\nplt.imshow(img_var, cmap='gray')\nksize = 45\ntheta = np.pi / 2\nkernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype\n =cv2.CV_32F)\nfiltered_image = cv2.filter2D(img, cv2.CV_8UC3, kernel)\nplt.imshow(filtered_image, cmap='gray')\n<mask token>\nentropy_img = entropy(img, disk(15))\nplt.imshow(entropy_img)\nplt.hist(entropy_img.flat, bins=100, range=(0, 7))\nthresh = threshold_otsu(entropy_img)\nbinary = entropy_img <= thresh\nplt.imshow(binary)\nscratch_area = np.sum(binary == 1)\nprint('Scratched area is: ', scratch_area, 'Square pixels')\nscale = 0.45\nprint('Scratched area in sq. microns is: ', scratch_area * scale ** 2,\n 'Square pixels')\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nfrom skimage import io\nimport numpy as np\nfrom skimage.filters import threshold_otsu\nimport cv2\nimg = io.imread('texture.png', as_gray=True)\nfrom scipy import ndimage\nk = 7\nimg_mean = ndimage.uniform_filter(img, (k, k))\nimg_sqr_mean = ndimage.uniform_filter(img ** 2, (k, k))\nimg_var = img_sqr_mean - img_mean ** 2\nplt.imshow(img_var, cmap='gray')\nksize = 45\ntheta = np.pi / 2\nkernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype\n =cv2.CV_32F)\nfiltered_image = cv2.filter2D(img, cv2.CV_8UC3, kernel)\nplt.imshow(filtered_image, cmap='gray')\nfrom skimage.filters.rank import entropy\nfrom skimage.morphology import disk\nentropy_img = entropy(img, disk(15))\nplt.imshow(entropy_img)\nplt.hist(entropy_img.flat, bins=100, range=(0, 7))\nthresh = threshold_otsu(entropy_img)\nbinary = entropy_img <= thresh\nplt.imshow(binary)\nscratch_area = np.sum(binary == 1)\nprint('Scratched area is: ', scratch_area, 'Square pixels')\nscale = 0.45\nprint('Scratched area in sq. microns is: ', scratch_area * scale ** 2,\n 'Square pixels')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 14 09:54:28 2020\n\n@author: rushirajsinhparmar\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom skimage import io\n\nimport numpy as np\nfrom skimage.filters import threshold_otsu\nimport cv2\n\nimg = io.imread(\"texture.png\", as_gray=True)\n\n##################################################\n#Variance - not a great way to quantify texture\nfrom scipy import ndimage \nk=7\nimg_mean = ndimage.uniform_filter(img, (k, k))\nimg_sqr_mean = ndimage.uniform_filter(img**2, (k, k))\nimg_var = img_sqr_mean - img_mean**2\nplt.imshow(img_var, cmap='gray')\n\n#######################################################\n#GABOR - A great filter for texture but usually efficient\n#if we know exact parameters. Good choice for generating features\n#for machine learning\n\nksize=45\ntheta=np.pi/2\nkernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype=cv2.CV_32F)\nfiltered_image = cv2.filter2D(img, cv2.CV_8UC3, kernel)\nplt.imshow(filtered_image, cmap='gray')\n\n###########################################################\n#Entropy\n#Entropy quantifies disorder.\n#Since cell region has high variation in pixel values the entropy would be\n#higher compared to scratch region\nfrom skimage.filters.rank import entropy\nfrom skimage.morphology import disk\nentropy_img = entropy(img, disk(15))\nplt.imshow(entropy_img) \n\n#use otsu to threshold high vs low entropy regions.\nplt.hist(entropy_img.flat, bins=100, range=(0,7)) #.flat returns the flattened numpy array (1D)\n\nthresh = threshold_otsu(entropy_img) \n\n#binarize the entropy image \nbinary = entropy_img <= thresh\nplt.imshow(binary)\n\n#Sum all pixels in the scratch region (values =1)\nscratch_area = np.sum(binary == 1)\nprint(\"Scratched area is: \", scratch_area, \"Square pixels\")\n\nscale = 0.45 # microns/pixel\nprint(\"Scratched area in sq. microns is: \", scratch_area*((scale)**2), \"Square pixels\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.