code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register(
'file','',VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'File path for storing output')
options.parseArguments()
file_path = options.file
#print file_path
process = cms.Process("RawAnalyzer")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events
#default is HcalTBSource but you can change to PoolSource if you like
#process.source = cms.Source("HcalTBSource",
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal
# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local
# '/store/group/comm_hcal/LS1/USC_222759.root'
# '/store/group/comm_hcal/LS1/USC_223775.root'
# '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014
# '/store/group/comm_hcal/LS1/USC_224625.root'
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'
)
)
process.analyzer = cms.EDAnalyzer('RawAnalyzer',
debugit = cms.untracked.bool(False),
outputFile = cms.untracked.string(file_path),
badevlist = cms.vint32(
153647285, 152905909, 153143477, 153217205, 151718625, 153024693, 150641153, 151460577,
152364043, 152889525, 153151669, 151148928, 153471157, 149944833, 151407329, 152529024,
150403585, 151124352, 152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313, 152910005, 153348277,
154002162, 149846529, 150489601, 150526465, 151370465, 152959157, 153262261, 153916146,
150202881, 152750261, 153004213),
modval = cms.untracked.int32(112)
)
process.TFileService = cms.Service("TFileService",fileName = cms.string("RawAnalyzer.root") )
process.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events
process.p = cms.Path(process.analyzer)
|
normal
|
{
"blob_id": "6aff61ce5cef537e6b1b19e382d8bf80e3a61693",
"index": 1423,
"step-1": "<mask token>\n",
"step-2": "<mask token>\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\n<mask token>\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n<mask token>\n",
"step-3": "<mask token>\noptions = VarParsing.VarParsing()\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\nprocess = cms.Process('RawAnalyzer')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nprocess.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n 'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n ))\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool\n (False), outputFile=cms.untracked.string(file_path), badevlist=cms.\n vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,\n 150641153, 151460577, 152364043, 152889525, 153151669, 151148928, \n 153471157, 149944833, 151407329, 152529024, 150403585, 151124352, \n 152368139, 152451200, 152950965, 153135285, 154125042, 154268402, \n 152261643, 150718977, 152737973, 153409717, 153800866, 151321313, \n 152910005, 153348277, 154002162, 149846529, 150489601, 150526465, \n 151370465, 152959157, 153262261, 153916146, 150202881, 152750261, \n 153004213), modval=cms.untracked.int32(112))\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string(\n 'RawAnalyzer.root'))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000\nprocess.p = cms.Path(process.analyzer)\n",
"step-4": "import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing()\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\nprocess = cms.Process('RawAnalyzer')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nprocess.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n 'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n ))\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool\n (False), outputFile=cms.untracked.string(file_path), badevlist=cms.\n vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,\n 150641153, 151460577, 152364043, 152889525, 153151669, 151148928, \n 153471157, 149944833, 151407329, 152529024, 150403585, 151124352, \n 152368139, 152451200, 152950965, 153135285, 154125042, 154268402, \n 152261643, 150718977, 152737973, 153409717, 153800866, 151321313, \n 152910005, 153348277, 154002162, 149846529, 150489601, 150526465, \n 151370465, 152959157, 153262261, 153916146, 150202881, 152750261, \n 153004213), modval=cms.untracked.int32(112))\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string(\n 'RawAnalyzer.root'))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000\nprocess.p = cms.Path(process.analyzer)\n",
"step-5": "import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing()\noptions.register(\n\t'file','',VarParsing.VarParsing.multiplicity.singleton,\n\tVarParsing.VarParsing.varType.string,\n\t'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\n#print file_path\n\nprocess = cms.Process(\"RawAnalyzer\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events\n\n#default is HcalTBSource but you can change to PoolSource if you like\n#process.source = cms.Source(\"HcalTBSource\",\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'\n# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'\n# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal\n# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local\n# '/store/group/comm_hcal/LS1/USC_222759.root'\n# '/store/group/comm_hcal/LS1/USC_223775.root'\n#\t '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014\n# '/store/group/comm_hcal/LS1/USC_224625.root'\n'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'\n )\n)\n\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer',\n\tdebugit = cms.untracked.bool(False),\n\toutputFile = cms.untracked.string(file_path),\n\tbadevlist = cms.vint32(\n\t153647285,\t152905909,\t153143477,\t153217205,\t151718625,\t153024693,\t150641153,\t151460577,\n\t152364043,\t152889525,\t153151669,\t151148928,\t153471157,\t149944833,\t151407329,\t152529024,\n\t150403585,\t151124352,\t152368139,\t152451200,\t152950965,\t153135285,\t154125042,\t154268402,\n\t152261643,\t150718977,\t152737973,\t153409717,\t153800866,\t151321313,\t152910005,\t153348277,\n\t154002162,\t149846529,\t150489601,\t150526465,\t151370465,\t152959157,\t153262261,\t153916146,\n\t150202881,\t152750261, 153004213),\n\tmodval = cms.untracked.int32(112)\n)\nprocess.TFileService = cms.Service(\"TFileService\",fileName = cms.string(\"RawAnalyzer.root\") )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events\nprocess.p = cms.Path(process.analyzer)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Write function that determines if a string a palindrome off of any permutation
def palinPerm(str):
# Create empty set
charSet = set()
# Loop through string, if character does not exist in set, add it. If it does, remove it.
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
# The final set should either have 1 element or none
return len(charSet) == 1 or len(charSet) == 0
response = "It is a palinPerm" if palinPerm("dadadad") else "No, not a palinPerm"
print(response)
# Time Complexity: O(N)
|
normal
|
{
"blob_id": "04487dce97231a7be2bf3b164e93f0ea4d01ba05",
"index": 1160,
"step-1": "<mask token>\n",
"step-2": "def palinPerm(str):\n charSet = set()\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n return len(charSet) == 1 or len(charSet) == 0\n\n\n<mask token>\n",
"step-3": "def palinPerm(str):\n charSet = set()\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n return len(charSet) == 1 or len(charSet) == 0\n\n\n<mask token>\nprint(response)\n",
"step-4": "def palinPerm(str):\n charSet = set()\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n return len(charSet) == 1 or len(charSet) == 0\n\n\nresponse = 'It is a palinPerm' if palinPerm('dadadad'\n ) else 'No, not a palinPerm'\nprint(response)\n",
"step-5": "# Write function that determines if a string a palindrome off of any permutation\ndef palinPerm(str):\n # Create empty set\n charSet = set()\n\n # Loop through string, if character does not exist in set, add it. If it does, remove it.\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n\n # The final set should either have 1 element or none\n return len(charSet) == 1 or len(charSet) == 0\n\n\nresponse = \"It is a palinPerm\" if palinPerm(\"dadadad\") else \"No, not a palinPerm\"\nprint(response)\n\n# Time Complexity: O(N)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import platform
import subprocess
# try to import json module, if got an error use simplejson instead of json.
try:
import json
except ImportError:
import simplejson as json
# if your server uses fqdn, you can suppress the domain, just change the bellow variable to your domain.
my_domain = 'localdomain'
# checks if operating system is Linux.
if platform.system() == 'Linux':
# subprocess funciton, pass a operation system command as k variable.
def SubprocessPopen(k):
devnull = open(os.devnull, 'w')
proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True, stderr=devnull)
x = proc.communicate()[0]
return x.strip()
# display hostname
def display_hostname():
x = platform.node()
return x.replace(my_domain, '').replace('.', '').lower()
# in my case the first 3 letters of the hostname indicates the site location, change if you want.
def display_site():
sites = ('SNE', 'RJO', 'BFC')
x = platform.node()
site = x.upper()[:3]
if site in sites:
return site
else:
return ''
# display operation system release.
def display_release():
k = "lsb_release -d | awk -F':' '{{print $2}}'"
return (SubprocessPopen(k.strip()))
# display the hardware serial number.
def display_hw_serialnumber():
k = "dmidecode -s system-serial-number | egrep -v '^#'"
return (SubprocessPopen(k.strip()))
# display hardware vendor.
def display_hw_vendor():
k = "dmidecode -s system-manufacturer | egrep -v '^#'"
return (SubprocessPopen(k.strip()))
# display hardware model.
def display_hw_model():
k = "dmidecode -s system-product-name | egrep -v '^#'"
return SubprocessPopen(k.strip())
# display fibre channel id wwpn.
def display_fc_wwpn():
k = "cat /sys/class/fc_host/host*/port_name|xargs"
return SubprocessPopen(k.strip().replace('0x', ''))
# display ipv4 address.
def display_ipaddr():
k = "ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs"
return SubprocessPopen(k.strip())
# display EMC storage id.
def display_frame():
k = "powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs"
return SubprocessPopen(k.strip())
# display total memory in MB.
def display_memory():
k = "egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'"
return SubprocessPopen(k) + " MB"
# display cpu info, physical and cores.
def display_cpu():
k = "model=$(lscpu | egrep ^'Model name' | awk -F\: '{{print$2}}')\n" \
"socket=$(lscpu | egrep ^'Socket' | awk -F\: '{{print$2}}')\n" \
"cpu=$(lscpu | egrep ^'CPU\(' | awk -F\: '{{print$2}}')\n" \
"core=$(lscpu | egrep ^'Core' | awk -F\: '{{print$2}}')\n" \
"echo ""$model / $socket Socket\\(s\\) / $cpu CPU\\(s\\) / $core Core\\(s\\) per Socket"""
return SubprocessPopen(k)
# display information about Veritas InforScale and Cluster Server.
def display_cluster():
k = "/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1"
return SubprocessPopen(k)
# display the list of cluster nodes.
def display_clusternodes():
k = "/opt/VRTSvcs/bin/hasys -list"
return SubprocessPopen(k)
# display the name of Oracle instances.
def display_db():
k = "ps -ef | grep pmon | awk -F\_ '{{print $3}}' | egrep -v '^$|\+ASM'"
return SubprocessPopen(k)
# print all information on the screen.
print(
"server_name: {0:s} \n"
"server_release: {1:s} \n"
"server_site: {2:s} \n"
"server_vendor: {3:s} \n"
"server_model: {4:s} \n"
"server_serial: {5:s} \n"
"server_cpu: {6:s} \n"
"server_memory: {7:s} \n"
"server_ip: {8:s} \n"
"server_cluster: {9:s} \n"
"server_clusternodes: {10:s} \n"
"server_frame: {11:s} \n"
"server_wwpn: {12:s} \n"
"server_db: {13:s}".format(display_hostname(), display_release(), display_site(), display_hw_vendor(), display_hw_model(),
display_hw_serialnumber(),
display_cpu(), display_memory(), display_ipaddr(), display_cluster(), display_clusternodes(),
display_frame(),
display_fc_wwpn(), display_db()))
# create a dict to export info to sqlite db.
hadouken = {'server_name': display_hostname(), 'server_release': display_release(), 'server_site': display_site(),
'server_vendor': display_hw_vendor(), 'server_model': display_hw_model(),
'server_serial': display_hw_serialnumber(), 'server_cpu': display_cpu(), 'server_memory': display_memory(),
'server_ip': display_ipaddr(), 'server_cluster': display_cluster(), 'server_clusternodes': display_clusternodes(),
'server_frame': display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db': display_db()}
# export hadouken info to be loaded into sqlite3 using db.py..
hadouken_file = '/var/tmp/%s.json' % display_hostname()
fp = open(hadouken_file, 'w')
json.dump(hadouken, fp)
else:
# if the operation system is not Linux, sorry.
print("OS not supported.")
|
normal
|
{
"blob_id": "de819a72ab659b50620fad2296027cb9f4d3e4c0",
"index": 5048,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n<mask token>\nif platform.system() == 'Linux':\n\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,\n stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n def display_site():\n sites = 'SNE', 'RJO', 'BFC'\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_fc_wwpn():\n k = 'cat /sys/class/fc_host/host*/port_name|xargs'\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n def display_ipaddr():\n k = (\n \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_frame():\n k = (\n \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_memory():\n k = (\n \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n )\n return SubprocessPopen(k) + ' MB'\n\n def display_cpu():\n k = \"\"\"model=$(lscpu | egrep ^'Model name' | awk -F\\\\: '{{print$2}}')\nsocket=$(lscpu | egrep ^'Socket' | awk -F\\\\: '{{print$2}}')\ncpu=$(lscpu | egrep ^'CPU\\\\(' | awk -F\\\\: '{{print$2}}')\ncore=$(lscpu | egrep ^'Core' | awk -F\\\\: '{{print$2}}')\necho $model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n def display_clusternodes():\n k = '/opt/VRTSvcs/bin/hasys -list'\n return SubprocessPopen(k)\n\n def display_db():\n k = (\n \"ps -ef | grep pmon | awk -F\\\\_ '{{print $3}}' | egrep -v '^$|\\\\+ASM'\"\n )\n return SubprocessPopen(k)\n print(\n \"\"\"server_name: {0:s} \nserver_release: {1:s} \nserver_site: {2:s} \nserver_vendor: {3:s} \nserver_model: {4:s} \nserver_serial: {5:s} \nserver_cpu: {6:s} \nserver_memory: {7:s} \nserver_ip: {8:s} \nserver_cluster: {9:s} \nserver_clusternodes: {10:s} \nserver_frame: {11:s} \nserver_wwpn: {12:s} \nserver_db: {13:s}\"\"\"\n .format(display_hostname(), display_release(), display_site(),\n display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(\n ), display_clusternodes(), display_frame(), display_fc_wwpn(),\n display_db()))\n hadouken = {'server_name': display_hostname(), 'server_release':\n display_release(), 'server_site': display_site(), 'server_vendor':\n display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu':\n display_cpu(), 'server_memory': display_memory(), 'server_ip':\n display_ipaddr(), 'server_cluster': display_cluster(),\n 'server_clusternodes': display_clusternodes(), 'server_frame':\n display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':\n display_db()}\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\nelse:\n print('OS not supported.')\n",
"step-3": "<mask token>\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nmy_domain = 'localdomain'\nif platform.system() == 'Linux':\n\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,\n stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n def display_site():\n sites = 'SNE', 'RJO', 'BFC'\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_fc_wwpn():\n k = 'cat /sys/class/fc_host/host*/port_name|xargs'\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n def display_ipaddr():\n k = (\n \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_frame():\n k = (\n \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_memory():\n k = (\n \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n )\n return SubprocessPopen(k) + ' MB'\n\n def display_cpu():\n k = \"\"\"model=$(lscpu | egrep ^'Model name' | awk -F\\\\: '{{print$2}}')\nsocket=$(lscpu | egrep ^'Socket' | awk -F\\\\: '{{print$2}}')\ncpu=$(lscpu | egrep ^'CPU\\\\(' | awk -F\\\\: '{{print$2}}')\ncore=$(lscpu | egrep ^'Core' | awk -F\\\\: '{{print$2}}')\necho $model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n def display_clusternodes():\n k = '/opt/VRTSvcs/bin/hasys -list'\n return SubprocessPopen(k)\n\n def display_db():\n k = (\n \"ps -ef | grep pmon | awk -F\\\\_ '{{print $3}}' | egrep -v '^$|\\\\+ASM'\"\n )\n return SubprocessPopen(k)\n print(\n \"\"\"server_name: {0:s} \nserver_release: {1:s} \nserver_site: {2:s} \nserver_vendor: {3:s} \nserver_model: {4:s} \nserver_serial: {5:s} \nserver_cpu: {6:s} \nserver_memory: {7:s} \nserver_ip: {8:s} \nserver_cluster: {9:s} \nserver_clusternodes: {10:s} \nserver_frame: {11:s} \nserver_wwpn: {12:s} \nserver_db: {13:s}\"\"\"\n .format(display_hostname(), display_release(), display_site(),\n display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(\n ), display_clusternodes(), display_frame(), display_fc_wwpn(),\n display_db()))\n hadouken = {'server_name': display_hostname(), 'server_release':\n display_release(), 'server_site': display_site(), 'server_vendor':\n display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu':\n display_cpu(), 'server_memory': display_memory(), 'server_ip':\n display_ipaddr(), 'server_cluster': display_cluster(),\n 'server_clusternodes': display_clusternodes(), 'server_frame':\n display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':\n display_db()}\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\nelse:\n print('OS not supported.')\n",
"step-4": "import os\nimport platform\nimport subprocess\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nmy_domain = 'localdomain'\nif platform.system() == 'Linux':\n\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True,\n stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n def display_site():\n sites = 'SNE', 'RJO', 'BFC'\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n def display_fc_wwpn():\n k = 'cat /sys/class/fc_host/host*/port_name|xargs'\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n def display_ipaddr():\n k = (\n \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_frame():\n k = (\n \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n )\n return SubprocessPopen(k.strip())\n\n def display_memory():\n k = (\n \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n )\n return SubprocessPopen(k) + ' MB'\n\n def display_cpu():\n k = \"\"\"model=$(lscpu | egrep ^'Model name' | awk -F\\\\: '{{print$2}}')\nsocket=$(lscpu | egrep ^'Socket' | awk -F\\\\: '{{print$2}}')\ncpu=$(lscpu | egrep ^'CPU\\\\(' | awk -F\\\\: '{{print$2}}')\ncore=$(lscpu | egrep ^'Core' | awk -F\\\\: '{{print$2}}')\necho $model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n def display_clusternodes():\n k = '/opt/VRTSvcs/bin/hasys -list'\n return SubprocessPopen(k)\n\n def display_db():\n k = (\n \"ps -ef | grep pmon | awk -F\\\\_ '{{print $3}}' | egrep -v '^$|\\\\+ASM'\"\n )\n return SubprocessPopen(k)\n print(\n \"\"\"server_name: {0:s} \nserver_release: {1:s} \nserver_site: {2:s} \nserver_vendor: {3:s} \nserver_model: {4:s} \nserver_serial: {5:s} \nserver_cpu: {6:s} \nserver_memory: {7:s} \nserver_ip: {8:s} \nserver_cluster: {9:s} \nserver_clusternodes: {10:s} \nserver_frame: {11:s} \nserver_wwpn: {12:s} \nserver_db: {13:s}\"\"\"\n .format(display_hostname(), display_release(), display_site(),\n display_hw_vendor(), display_hw_model(), display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(\n ), display_clusternodes(), display_frame(), display_fc_wwpn(),\n display_db()))\n hadouken = {'server_name': display_hostname(), 'server_release':\n display_release(), 'server_site': display_site(), 'server_vendor':\n display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu':\n display_cpu(), 'server_memory': display_memory(), 'server_ip':\n display_ipaddr(), 'server_cluster': display_cluster(),\n 'server_clusternodes': display_clusternodes(), 'server_frame':\n display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db':\n display_db()}\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\nelse:\n print('OS not supported.')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport os\nimport platform\nimport subprocess\n\n# try to import json module, if got an error use simplejson instead of json.\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n# if your server uses fqdn, you can suppress the domain, just change the bellow variable to your domain.\nmy_domain = 'localdomain'\n\n# checks if operating system is Linux.\nif platform.system() == 'Linux':\n # subprocess funciton, pass a operation system command as k variable.\n def SubprocessPopen(k):\n devnull = open(os.devnull, 'w')\n proc = subprocess.Popen([k], stdout=subprocess.PIPE, shell=True, stderr=devnull)\n x = proc.communicate()[0]\n return x.strip()\n\n # display hostname\n def display_hostname():\n x = platform.node()\n return x.replace(my_domain, '').replace('.', '').lower()\n\n # in my case the first 3 letters of the hostname indicates the site location, change if you want.\n def display_site():\n sites = ('SNE', 'RJO', 'BFC')\n x = platform.node()\n site = x.upper()[:3]\n if site in sites:\n return site\n else:\n return ''\n\n # display operation system release.\n def display_release():\n k = \"lsb_release -d | awk -F':' '{{print $2}}'\"\n return (SubprocessPopen(k.strip()))\n\n # display the hardware serial number.\n def display_hw_serialnumber():\n k = \"dmidecode -s system-serial-number | egrep -v '^#'\"\n return (SubprocessPopen(k.strip()))\n\n # display hardware vendor.\n def display_hw_vendor():\n k = \"dmidecode -s system-manufacturer | egrep -v '^#'\"\n return (SubprocessPopen(k.strip()))\n\n # display hardware model.\n def display_hw_model():\n k = \"dmidecode -s system-product-name | egrep -v '^#'\"\n return SubprocessPopen(k.strip())\n\n # display fibre channel id wwpn.\n def display_fc_wwpn():\n k = \"cat /sys/class/fc_host/host*/port_name|xargs\"\n return SubprocessPopen(k.strip().replace('0x', ''))\n\n # display ipv4 address.\n def display_ipaddr():\n k = \"ip addr show | egrep inet | awk '{{print $2}}' | awk -F'/' '{{print $1}}' | egrep -v '^127|::'|xargs\"\n return SubprocessPopen(k.strip())\n\n # display EMC storage id.\n def display_frame():\n k = \"powermt display ports | awk '{{print $1}}' | egrep '^[A-Z]+{2}[0-9]|[0-9]' | sort -u|xargs\"\n return SubprocessPopen(k.strip())\n\n # display total memory in MB.\n def display_memory():\n k = \"egrep MemTotal /proc/meminfo | awk -F':' '{{print $2}}' | awk '{{print int($1/1024)}}'\"\n return SubprocessPopen(k) + \" MB\"\n\n # display cpu info, physical and cores.\n def display_cpu():\n k = \"model=$(lscpu | egrep ^'Model name' | awk -F\\: '{{print$2}}')\\n\" \\\n \"socket=$(lscpu | egrep ^'Socket' | awk -F\\: '{{print$2}}')\\n\" \\\n \"cpu=$(lscpu | egrep ^'CPU\\(' | awk -F\\: '{{print$2}}')\\n\" \\\n \"core=$(lscpu | egrep ^'Core' | awk -F\\: '{{print$2}}')\\n\" \\\n \"echo \"\"$model / $socket Socket\\\\(s\\\\) / $cpu CPU\\\\(s\\\\) / $core Core\\\\(s\\\\) per Socket\"\"\"\n return SubprocessPopen(k)\n\n # display information about Veritas InforScale and Cluster Server.\n def display_cluster():\n k = \"/opt/VRTSvcs/bin/haclus -state | awk '{{print $1}}' | tail -n1\"\n return SubprocessPopen(k)\n\n # display the list of cluster nodes.\n def display_clusternodes():\n k = \"/opt/VRTSvcs/bin/hasys -list\"\n return SubprocessPopen(k)\n\n # display the name of Oracle instances.\n def display_db():\n k = \"ps -ef | grep pmon | awk -F\\_ '{{print $3}}' | egrep -v '^$|\\+ASM'\"\n return SubprocessPopen(k)\n\n # print all information on the screen.\n print(\n \"server_name: {0:s} \\n\"\n \"server_release: {1:s} \\n\"\n \"server_site: {2:s} \\n\"\n \"server_vendor: {3:s} \\n\"\n \"server_model: {4:s} \\n\"\n \"server_serial: {5:s} \\n\"\n \"server_cpu: {6:s} \\n\"\n \"server_memory: {7:s} \\n\"\n \"server_ip: {8:s} \\n\"\n \"server_cluster: {9:s} \\n\"\n \"server_clusternodes: {10:s} \\n\"\n \"server_frame: {11:s} \\n\"\n \"server_wwpn: {12:s} \\n\"\n \"server_db: {13:s}\".format(display_hostname(), display_release(), display_site(), display_hw_vendor(), display_hw_model(),\n display_hw_serialnumber(),\n display_cpu(), display_memory(), display_ipaddr(), display_cluster(), display_clusternodes(),\n display_frame(),\n display_fc_wwpn(), display_db()))\n\n # create a dict to export info to sqlite db.\n hadouken = {'server_name': display_hostname(), 'server_release': display_release(), 'server_site': display_site(),\n 'server_vendor': display_hw_vendor(), 'server_model': display_hw_model(),\n 'server_serial': display_hw_serialnumber(), 'server_cpu': display_cpu(), 'server_memory': display_memory(),\n 'server_ip': display_ipaddr(), 'server_cluster': display_cluster(), 'server_clusternodes': display_clusternodes(),\n 'server_frame': display_frame(), 'server_wwpn': display_fc_wwpn(), 'server_db': display_db()}\n\n # export hadouken info to be loaded into sqlite3 using db.py..\n hadouken_file = '/var/tmp/%s.json' % display_hostname()\n fp = open(hadouken_file, 'w')\n json.dump(hadouken, fp)\n\nelse:\n # if the operation system is not Linux, sorry.\n print(\"OS not supported.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""game"""
def get_word_score(word_1, n_1):
"""string"""
# import string
# key = list(string.ascii_lowercase)
# value = []
# x=1
sum_1 = 0
# for i in range(0, 26):
# value.append(x)
# x+=1
# dictionary_ = dict(zip(key, value))
# print(dictionary_)
dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10}
length_1 = len(word_1)
# if length_1 <= n_1:
for i in word_1:
if i in dictionary_.keys():
sum_1 = sum_1 + dictionary_[i]
sum_1 = sum_1*length_1
if n_1 == length_1:
sum_1 += 50
return sum_1
# print("worng inputs")
def main():
'''
Main function for the given problem
'''
data = input()
data = data.split(" ")
print(get_word_score(data[0], int(data[1])))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "325708d5e8b71bad4806b59f3f86a737c1baef8d",
"index": 3976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n sum_1 = 0\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':\n 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n 'y': 4, 'z': 10}\n length_1 = len(word_1)\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1 * length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n sum_1 = 0\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':\n 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n 'y': 4, 'z': 10}\n length_1 = len(word_1)\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1 * length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n\n\ndef main():\n \"\"\"\n Main function for the given problem\n \"\"\"\n data = input()\n data = data.split(' ')\n print(get_word_score(data[0], int(data[1])))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n sum_1 = 0\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p':\n 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8,\n 'y': 4, 'z': 10}\n length_1 = len(word_1)\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1 * length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n\n\ndef main():\n \"\"\"\n Main function for the given problem\n \"\"\"\n data = input()\n data = data.split(' ')\n print(get_word_score(data[0], int(data[1])))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"game\"\"\"\ndef get_word_score(word_1, n_1):\n \"\"\"string\"\"\"\n # import string\n # key = list(string.ascii_lowercase)\n # value = []\n # x=1\n sum_1 = 0\n # for i in range(0, 26):\n # value.append(x)\n # x+=1\n # dictionary_ = dict(zip(key, value))\n # print(dictionary_)\n dictionary_ = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10}\n length_1 = len(word_1)\n # if length_1 <= n_1:\n for i in word_1:\n if i in dictionary_.keys():\n sum_1 = sum_1 + dictionary_[i]\n sum_1 = sum_1*length_1\n if n_1 == length_1:\n sum_1 += 50\n return sum_1\n # print(\"worng inputs\")\ndef main():\n '''\n Main function for the given problem\n '''\n data = input()\n data = data.split(\" \")\n print(get_word_score(data[0], int(data[1])))\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
import re
import traceback
from pesto_common.config.configer import Configer
from pesto_common.log.logger_factory import LoggerFactory
from pesto_orm.core.base import db_config
from pesto_orm.core.executor import ExecutorFactory
from pesto_orm.core.model import BaseModel
from pesto_orm.core.repository import BaseRepository
from pesto_orm.dialect.base import DefaultDialect
logger = LoggerFactory.get_logger('dialect.mysql.domain')
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
db_type = Configer.get('db.type')
if db_type == 'mysql':
import mysql.connector as connector
db_config['target'] = connector
db_config['use_pure'] = True
from mysql.connector.conversion import MySQLConverter
class NumpyMySQLConverter(MySQLConverter):
''' A mysql.connector Converter that handles Numpy types '''
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
db_config['converter_class'] = NumpyMySQLConverter
mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)
mysqlDialect = MySQLDialect()
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None, primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name, table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs): # 真实执行原方法.
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(func.__name__) + ', error:' + traceback.format_exc() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
|
normal
|
{
"blob_id": "a68de7555fdab06014fd562e7db29ca2da03f443",
"index": 8240,
"step-1": "<mask token>\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n <mask token>\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\n<mask token>\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n\n def wrap(func):\n\n def handle(result, **kwargs):\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n if len(rollback_exceptions\n ) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(\n func.__name__) + ', error:' + traceback.format_exc\n () + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n return to_do\n return wrap\n",
"step-3": "<mask token>\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\n<mask token>\nif db_type == 'mysql':\n import mysql.connector as connector\n db_config['target'] = connector\n db_config['use_pure'] = True\n from mysql.connector.conversion import MySQLConverter\n\n\n class NumpyMySQLConverter(MySQLConverter):\n \"\"\" A mysql.connector Converter that handles Numpy types \"\"\"\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n db_config['converter_class'] = NumpyMySQLConverter\n mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)\n mysqlDialect = MySQLDialect()\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n\n def wrap(func):\n\n def handle(result, **kwargs):\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n if len(rollback_exceptions\n ) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(\n func.__name__) + ', error:' + traceback.format_exc\n () + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n return to_do\n return wrap\n",
"step-4": "import re\nimport traceback\nfrom pesto_common.config.configer import Configer\nfrom pesto_common.log.logger_factory import LoggerFactory\nfrom pesto_orm.core.base import db_config\nfrom pesto_orm.core.executor import ExecutorFactory\nfrom pesto_orm.core.model import BaseModel\nfrom pesto_orm.core.repository import BaseRepository\nfrom pesto_orm.dialect.base import DefaultDialect\nlogger = LoggerFactory.get_logger('dialect.mysql.domain')\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\ndb_type = Configer.get('db.type')\nif db_type == 'mysql':\n import mysql.connector as connector\n db_config['target'] = connector\n db_config['use_pure'] = True\n from mysql.connector.conversion import MySQLConverter\n\n\n class NumpyMySQLConverter(MySQLConverter):\n \"\"\" A mysql.connector Converter that handles Numpy types \"\"\"\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n db_config['converter_class'] = NumpyMySQLConverter\n mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)\n mysqlDialect = MySQLDialect()\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None,\n primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name,\n table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n\n def wrap(func):\n\n def handle(result, **kwargs):\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n if len(rollback_exceptions\n ) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(\n func.__name__) + ', error:' + traceback.format_exc\n () + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n return to_do\n return wrap\n",
"step-5": "# coding=utf-8\nimport re\nimport traceback\n\nfrom pesto_common.config.configer import Configer\nfrom pesto_common.log.logger_factory import LoggerFactory\nfrom pesto_orm.core.base import db_config\nfrom pesto_orm.core.executor import ExecutorFactory\nfrom pesto_orm.core.model import BaseModel\nfrom pesto_orm.core.repository import BaseRepository\nfrom pesto_orm.dialect.base import DefaultDialect\n\nlogger = LoggerFactory.get_logger('dialect.mysql.domain')\n\n\nclass MySQLDialect(DefaultDialect):\n\n def get_db_type(self):\n return 'mysql'\n\n def paginate_with(self, sql, page_number, page_size):\n if page_number == 1 and page_size == 1:\n if re.match(DefaultDialect.select_single_pattern, sql) is not None:\n return sql\n\n offset = page_size * (page_number - 1)\n return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)\n\n\ndb_type = Configer.get('db.type')\nif db_type == 'mysql':\n import mysql.connector as connector\n\n db_config['target'] = connector\n db_config['use_pure'] = True\n\n from mysql.connector.conversion import MySQLConverter\n\n\n class NumpyMySQLConverter(MySQLConverter):\n ''' A mysql.connector Converter that handles Numpy types '''\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n\n\n db_config['converter_class'] = NumpyMySQLConverter\n\n mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)\n\n mysqlDialect = MySQLDialect()\n\n\nclass MysqlBaseModel(BaseModel):\n\n def __init__(self, db_name=None, table_name=None, table_alias=None, primary_key='id'):\n super(MysqlBaseModel, self).__init__(db_name, table_name, table_alias, primary_key)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\nclass MysqlBaseRepository(BaseRepository):\n\n def __init__(self, model_class=None):\n super(MysqlBaseRepository, self).__init__(model_class)\n\n def get_dialect(self):\n return mysqlDialect\n\n def get_executor(self):\n return mysqlExecutor\n\n\ndef transaction(rollback_exceptions=[]):\n def wrap(func):\n def handle(result, **kwargs): # 真实执行原方法.\n func = kwargs['func']\n args = kwargs['args']\n kwargs = kwargs['kwargs']\n return_value = func(*args, **kwargs)\n logger.info('Transaction method: ' + func.__name__)\n result.append(return_value)\n\n def to_do(*args, **kwargs):\n new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}\n\n result = []\n try:\n mysqlExecutor.begin_transaction()\n handle(result, **new_kwargs)\n mysqlExecutor.commit_transaction()\n except Exception as e:\n\n if len(rollback_exceptions) == 0 or e.__class__ in rollback_exceptions:\n mysqlExecutor.rollback_transaction()\n logger.error('Method execute error. method: ' + str(func.__name__) + ', error:' + traceback.format_exc() + ', transaction roll back.')\n else:\n mysqlExecutor.commit_transaction()\n raise e\n finally:\n mysqlExecutor.close_transaction()\n\n return to_do\n\n return wrap\n",
"step-ids": [
7,
12,
13,
15,
16
]
}
|
[
7,
12,
13,
15,
16
] |
#!/usr/bin/env python3
#coding=utf8
from __future__ import (division,absolute_import,print_function,unicode_literals)
import argparse, csv, sys,subprocess,time
NR_THREAD=20
def shell(cmd):
subprocess.call(cmd,shell=True)
print("Done! {0}.".format(cmd))
start=time.time()
cmd = 'mkdir FTRL/tmp -p'
shell(cmd)
cmd = 'mkdir FTRL/data -p'
shell(cmd)
#cmd = 'FTRL/ensamble/ensamble.py -s {nr_thread} -f 5 ffmData/Filter100/click_train.ffm ffmData/Filter100/click_test.ffm FTRL/data/click_train_out.txt FTRL/data/click_test_out.txt '.format(nr_thread=NR_THREAD)
#shell(cmd)
cmd = 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'
shell(cmd)
cmd='util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'
shell(cmd)
print('time used = {0:.0f}'.format(time.time()-start))
|
normal
|
{
"blob_id": "2a0172641c48c47f048bf5e9f1889b29abbb0b7c",
"index": 767,
"step-1": "<mask token>\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\n<mask token>\nshell(cmd)\n<mask token>\nshell(cmd)\n<mask token>\nshell(cmd)\n<mask token>\nshell(cmd)\nprint('time used = {0:.0f}'.format(time.time() - start))\n",
"step-3": "<mask token>\nNR_THREAD = 20\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\nstart = time.time()\ncmd = 'mkdir FTRL/tmp -p'\nshell(cmd)\ncmd = 'mkdir FTRL/data -p'\nshell(cmd)\ncmd = (\n 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'\n )\nshell(cmd)\ncmd = 'util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'\nshell(cmd)\nprint('time used = {0:.0f}'.format(time.time() - start))\n",
"step-4": "from __future__ import division, absolute_import, print_function, unicode_literals\nimport argparse, csv, sys, subprocess, time\nNR_THREAD = 20\n\n\ndef shell(cmd):\n subprocess.call(cmd, shell=True)\n print('Done! {0}.'.format(cmd))\n\n\nstart = time.time()\ncmd = 'mkdir FTRL/tmp -p'\nshell(cmd)\ncmd = 'mkdir FTRL/data -p'\nshell(cmd)\ncmd = (\n 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'\n )\nshell(cmd)\ncmd = 'util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'\nshell(cmd)\nprint('time used = {0:.0f}'.format(time.time() - start))\n",
"step-5": "#!/usr/bin/env python3\n#coding=utf8\nfrom __future__ import (division,absolute_import,print_function,unicode_literals)\nimport argparse, csv, sys,subprocess,time\n\nNR_THREAD=20\ndef shell(cmd):\n subprocess.call(cmd,shell=True)\n print(\"Done! {0}.\".format(cmd))\n\nstart=time.time()\n\ncmd = 'mkdir FTRL/tmp -p'\nshell(cmd)\n\ncmd = 'mkdir FTRL/data -p'\nshell(cmd)\n\n#cmd = 'FTRL/ensamble/ensamble.py -s {nr_thread} -f 5 ffmData/Filter100/click_train.ffm ffmData/Filter100/click_test.ffm FTRL/data/click_train_out.txt FTRL/data/click_test_out.txt '.format(nr_thread=NR_THREAD)\n#shell(cmd)\n\ncmd = 'FTRL/FTRLStarter.py ffmData/filter100/split_train.ffm ffmData/filter100/split_test.ffm FTRL/tmp/split_test_cv.out ffmData/filter100/click_test.ffm FTRL/tmp/click_test_cv.out'\nshell(cmd)\n\ncmd='util/map.py FTRL/tmp/split_test_cv.out data/split_test.csv'\nshell(cmd)\n\nprint('time used = {0:.0f}'.format(time.time()-start))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Blueprint, current_app
logging = Blueprint("logging", __name__)
@logging.route("/debug/")
def debug():
current_app.logger.debug("some debug message")
return ""
@logging.route("/warning/")
def warning():
current_app.logger.warning("some warning message")
return ""
@logging.route("/error/")
def error():
current_app.logger.error("some error message")
return ""
|
normal
|
{
"blob_id": "7c2d57a8368eb8d1699364c60e98766e66f01569",
"index": 4659,
"step-1": "<mask token>\n\n\[email protected]('/debug/')\ndef debug():\n current_app.logger.debug('some debug message')\n return ''\n\n\n<mask token>\n\n\[email protected]('/error/')\ndef error():\n current_app.logger.error('some error message')\n return ''\n",
"step-2": "<mask token>\n\n\[email protected]('/debug/')\ndef debug():\n current_app.logger.debug('some debug message')\n return ''\n\n\[email protected]('/warning/')\ndef warning():\n current_app.logger.warning('some warning message')\n return ''\n\n\[email protected]('/error/')\ndef error():\n current_app.logger.error('some error message')\n return ''\n",
"step-3": "<mask token>\nlogging = Blueprint('logging', __name__)\n\n\[email protected]('/debug/')\ndef debug():\n current_app.logger.debug('some debug message')\n return ''\n\n\[email protected]('/warning/')\ndef warning():\n current_app.logger.warning('some warning message')\n return ''\n\n\[email protected]('/error/')\ndef error():\n current_app.logger.error('some error message')\n return ''\n",
"step-4": "from flask import Blueprint, current_app\nlogging = Blueprint('logging', __name__)\n\n\[email protected]('/debug/')\ndef debug():\n current_app.logger.debug('some debug message')\n return ''\n\n\[email protected]('/warning/')\ndef warning():\n current_app.logger.warning('some warning message')\n return ''\n\n\[email protected]('/error/')\ndef error():\n current_app.logger.error('some error message')\n return ''\n",
"step-5": "\nfrom flask import Blueprint, current_app\n\n\nlogging = Blueprint(\"logging\", __name__)\n\n\[email protected](\"/debug/\")\ndef debug():\n\n current_app.logger.debug(\"some debug message\")\n return \"\"\n\n\[email protected](\"/warning/\")\ndef warning():\n\n current_app.logger.warning(\"some warning message\")\n return \"\"\n\n\[email protected](\"/error/\")\ndef error():\n\n current_app.logger.error(\"some error message\")\n return \"\"\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import turtle
def distance(x1, y1, x2, y2):
return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5
x1, y1 = eval(input("Enter x1 and y1 for point 1: "))
x2, y2 = eval(input("Enter x2 and y2 for point 2: "))
distanceBetweenPoints = distance(x1, y1, x2, y2)
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.write("Point 1")
turtle.goto(x2, y2)
turtle.write("Point 2")
#Center of line
turtle.penup()
turtle.goto((x1 + x2) / 2, (y1 + y2) / 2)
turtle.write("Distance")
turtle.done()
|
normal
|
{
"blob_id": "9f8065dfdfe07985244e18d92b59e1c045388a72",
"index": 2557,
"step-1": "<mask token>\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\n<mask token>\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write('Point 1')\nturtle.goto(x2, y2)\nturtle.write('Point 2')\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write('Distance')\nturtle.done()\n",
"step-3": "<mask token>\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\nx1, y1 = eval(input('Enter x1 and y1 for point 1: '))\nx2, y2 = eval(input('Enter x2 and y2 for point 2: '))\ndistanceBetweenPoints = distance(x1, y1, x2, y2)\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write('Point 1')\nturtle.goto(x2, y2)\nturtle.write('Point 2')\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write('Distance')\nturtle.done()\n",
"step-4": "import turtle\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\nx1, y1 = eval(input('Enter x1 and y1 for point 1: '))\nx2, y2 = eval(input('Enter x2 and y2 for point 2: '))\ndistanceBetweenPoints = distance(x1, y1, x2, y2)\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write('Point 1')\nturtle.goto(x2, y2)\nturtle.write('Point 2')\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write('Distance')\nturtle.done()\n",
"step-5": "import turtle\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\nx1, y1 = eval(input(\"Enter x1 and y1 for point 1: \"))\nx2, y2 = eval(input(\"Enter x2 and y2 for point 2: \"))\n\ndistanceBetweenPoints = distance(x1, y1, x2, y2)\n\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write(\"Point 1\")\nturtle.goto(x2, y2)\nturtle.write(\"Point 2\")\n\n#Center of line\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write(\"Distance\")\n\n\n\nturtle.done()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.db import models
from colorfield.fields import ColorField
from api import settings
from os.path import splitext
from datetime import datetime, timedelta
from PIL import Image
def saveTaskPhoto(instance,filename):
taskId = instance.id
name,ext = splitext(filename)
return f'tasks/task_{taskId}{ext}'
class Task(models.Model):
difficulty = models.IntegerField(default=0)
category = models.SmallIntegerField(default=0)
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
title = models.CharField(max_length=150)
repeat = models.IntegerField() # Set when a tasks is created and will not channge
frequency = models.IntegerField() # number of days
duration = models.IntegerField(null=True,blank=True) # number of minutes
thumbnail = models.ImageField(null=True,blank=True,upload_to=saveTaskPhoto)
description = models.TextField(null=True,blank=True)
begin = models.DateField(auto_now_add=True)
lastBegin = models.DateTimeField(null=True,blank=True)
done = models.IntegerField(default=0) # how many time the tasks has been done
class Meta:
unique_together = ('title', 'user',)
def save(self, *args, **kwargs):
super(Task, self).save(*args, **kwargs)
if(self.thumbnail):
image = Image.open(self.thumbnail.path)
output_size = (50, 50)
image.thumbnail(output_size)
image.save(self.thumbnail.path)
@property
def points(self):
return self.done * self.difficulty.points
@property
def finish(self):
return self.done == self.repeat
@property
def predictedEnd(self):
today = datetime.today()
delta = (self.repeat - self.done)*self.frequency
end = today + timedelta(days=delta)
return end.date()
@property
def state(self):
if self.lastBegin is None :
return "to do"
now = datetime.now()
delta = now - self.lastBegin
if delta.days >= self.frequency :
return "to do"
else :
if self.duration and self.lastBegin + timedelta(minutes=self.duration) > now :
return "doing"
else :
return "done"
def __str__(self):
return f'{self.title} / {self.id}'
|
normal
|
{
"blob_id": "e59bd92a94399d4a81687fc5e52e9ae04b9de768",
"index": 7472,
"step-1": "<mask token>\n\n\nclass Task(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n unique_together = 'title', 'user'\n\n def save(self, *args, **kwargs):\n super(Task, self).save(*args, **kwargs)\n if self.thumbnail:\n image = Image.open(self.thumbnail.path)\n output_size = 50, 50\n image.thumbnail(output_size)\n image.save(self.thumbnail.path)\n\n @property\n def points(self):\n return self.done * self.difficulty.points\n\n @property\n def finish(self):\n return self.done == self.repeat\n\n @property\n def predictedEnd(self):\n today = datetime.today()\n delta = (self.repeat - self.done) * self.frequency\n end = today + timedelta(days=delta)\n return end.date()\n\n @property\n def state(self):\n if self.lastBegin is None:\n return 'to do'\n now = datetime.now()\n delta = now - self.lastBegin\n if delta.days >= self.frequency:\n return 'to do'\n elif self.duration and self.lastBegin + timedelta(minutes=self.duration\n ) > now:\n return 'doing'\n else:\n return 'done'\n\n def __str__(self):\n return f'{self.title} / {self.id}'\n",
"step-2": "<mask token>\n\n\nclass Task(models.Model):\n difficulty = models.IntegerField(default=0)\n category = models.SmallIntegerField(default=0)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.CharField(max_length=150)\n repeat = models.IntegerField()\n frequency = models.IntegerField()\n duration = models.IntegerField(null=True, blank=True)\n thumbnail = models.ImageField(null=True, blank=True, upload_to=\n saveTaskPhoto)\n description = models.TextField(null=True, blank=True)\n begin = models.DateField(auto_now_add=True)\n lastBegin = models.DateTimeField(null=True, blank=True)\n done = models.IntegerField(default=0)\n\n\n class Meta:\n unique_together = 'title', 'user'\n\n def save(self, *args, **kwargs):\n super(Task, self).save(*args, **kwargs)\n if self.thumbnail:\n image = Image.open(self.thumbnail.path)\n output_size = 50, 50\n image.thumbnail(output_size)\n image.save(self.thumbnail.path)\n\n @property\n def points(self):\n return self.done * self.difficulty.points\n\n @property\n def finish(self):\n return self.done == self.repeat\n\n @property\n def predictedEnd(self):\n today = datetime.today()\n delta = (self.repeat - self.done) * self.frequency\n end = today + timedelta(days=delta)\n return end.date()\n\n @property\n def state(self):\n if self.lastBegin is None:\n return 'to do'\n now = datetime.now()\n delta = now - self.lastBegin\n if delta.days >= self.frequency:\n return 'to do'\n elif self.duration and self.lastBegin + timedelta(minutes=self.duration\n ) > now:\n return 'doing'\n else:\n return 'done'\n\n def __str__(self):\n return f'{self.title} / {self.id}'\n",
"step-3": "<mask token>\n\n\ndef saveTaskPhoto(instance, filename):\n taskId = instance.id\n name, ext = splitext(filename)\n return f'tasks/task_{taskId}{ext}'\n\n\nclass Task(models.Model):\n difficulty = models.IntegerField(default=0)\n category = models.SmallIntegerField(default=0)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.CharField(max_length=150)\n repeat = models.IntegerField()\n frequency = models.IntegerField()\n duration = models.IntegerField(null=True, blank=True)\n thumbnail = models.ImageField(null=True, blank=True, upload_to=\n saveTaskPhoto)\n description = models.TextField(null=True, blank=True)\n begin = models.DateField(auto_now_add=True)\n lastBegin = models.DateTimeField(null=True, blank=True)\n done = models.IntegerField(default=0)\n\n\n class Meta:\n unique_together = 'title', 'user'\n\n def save(self, *args, **kwargs):\n super(Task, self).save(*args, **kwargs)\n if self.thumbnail:\n image = Image.open(self.thumbnail.path)\n output_size = 50, 50\n image.thumbnail(output_size)\n image.save(self.thumbnail.path)\n\n @property\n def points(self):\n return self.done * self.difficulty.points\n\n @property\n def finish(self):\n return self.done == self.repeat\n\n @property\n def predictedEnd(self):\n today = datetime.today()\n delta = (self.repeat - self.done) * self.frequency\n end = today + timedelta(days=delta)\n return end.date()\n\n @property\n def state(self):\n if self.lastBegin is None:\n return 'to do'\n now = datetime.now()\n delta = now - self.lastBegin\n if delta.days >= self.frequency:\n return 'to do'\n elif self.duration and self.lastBegin + timedelta(minutes=self.duration\n ) > now:\n return 'doing'\n else:\n return 'done'\n\n def __str__(self):\n return f'{self.title} / {self.id}'\n",
"step-4": "from django.db import models\nfrom colorfield.fields import ColorField\nfrom api import settings\nfrom os.path import splitext\nfrom datetime import datetime, timedelta\nfrom PIL import Image\n\n\ndef saveTaskPhoto(instance, filename):\n taskId = instance.id\n name, ext = splitext(filename)\n return f'tasks/task_{taskId}{ext}'\n\n\nclass Task(models.Model):\n difficulty = models.IntegerField(default=0)\n category = models.SmallIntegerField(default=0)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.CharField(max_length=150)\n repeat = models.IntegerField()\n frequency = models.IntegerField()\n duration = models.IntegerField(null=True, blank=True)\n thumbnail = models.ImageField(null=True, blank=True, upload_to=\n saveTaskPhoto)\n description = models.TextField(null=True, blank=True)\n begin = models.DateField(auto_now_add=True)\n lastBegin = models.DateTimeField(null=True, blank=True)\n done = models.IntegerField(default=0)\n\n\n class Meta:\n unique_together = 'title', 'user'\n\n def save(self, *args, **kwargs):\n super(Task, self).save(*args, **kwargs)\n if self.thumbnail:\n image = Image.open(self.thumbnail.path)\n output_size = 50, 50\n image.thumbnail(output_size)\n image.save(self.thumbnail.path)\n\n @property\n def points(self):\n return self.done * self.difficulty.points\n\n @property\n def finish(self):\n return self.done == self.repeat\n\n @property\n def predictedEnd(self):\n today = datetime.today()\n delta = (self.repeat - self.done) * self.frequency\n end = today + timedelta(days=delta)\n return end.date()\n\n @property\n def state(self):\n if self.lastBegin is None:\n return 'to do'\n now = datetime.now()\n delta = now - self.lastBegin\n if delta.days >= self.frequency:\n return 'to do'\n elif self.duration and self.lastBegin + timedelta(minutes=self.duration\n ) > now:\n return 'doing'\n else:\n return 'done'\n\n def __str__(self):\n return f'{self.title} / {self.id}'\n",
"step-5": "from django.db import models\nfrom colorfield.fields import ColorField\nfrom api import settings\nfrom os.path import splitext\nfrom datetime import datetime, timedelta\nfrom PIL import Image\n\n\ndef saveTaskPhoto(instance,filename):\n taskId = instance.id\n name,ext = splitext(filename)\n return f'tasks/task_{taskId}{ext}'\n \nclass Task(models.Model):\n difficulty = models.IntegerField(default=0)\n category = models.SmallIntegerField(default=0)\n user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)\n title = models.CharField(max_length=150)\n repeat = models.IntegerField() # Set when a tasks is created and will not channge\n frequency = models.IntegerField() # number of days\n duration = models.IntegerField(null=True,blank=True) # number of minutes\n thumbnail = models.ImageField(null=True,blank=True,upload_to=saveTaskPhoto) \n description = models.TextField(null=True,blank=True)\n begin = models.DateField(auto_now_add=True)\n lastBegin = models.DateTimeField(null=True,blank=True)\n done = models.IntegerField(default=0) # how many time the tasks has been done\n\n class Meta:\n unique_together = ('title', 'user',)\n\n def save(self, *args, **kwargs):\n super(Task, self).save(*args, **kwargs)\n if(self.thumbnail):\n image = Image.open(self.thumbnail.path)\n output_size = (50, 50) \n image.thumbnail(output_size)\n image.save(self.thumbnail.path)\n\n @property\n def points(self):\n return self.done * self.difficulty.points\n \n @property\n def finish(self):\n return self.done == self.repeat\n \n @property\n def predictedEnd(self):\n today = datetime.today()\n delta = (self.repeat - self.done)*self.frequency\n end = today + timedelta(days=delta)\n return end.date()\n \n @property\n def state(self):\n if self.lastBegin is None :\n return \"to do\"\n now = datetime.now()\n delta = now - self.lastBegin\n if delta.days >= self.frequency :\n return \"to do\"\n else :\n if self.duration and self.lastBegin + timedelta(minutes=self.duration) > now :\n return \"doing\"\n else :\n return \"done\"\n \n def __str__(self):\n return f'{self.title} / {self.id}'\n\n\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from .serializers import ConcertSerializer
from .models import Concert
from .permissions import IsOwnerOrReadOnly
class ConcertList(ListCreateAPIView):
queryset = Concert.objects.all()
serializer_class = ConcertSerializer
class ConcertDetail(RetrieveUpdateDestroyAPIView):
permission_classes = (IsOwnerOrReadOnly,)
queryset = Concert.objects.all()
serializer_class = ConcertSerializer
|
normal
|
{
"blob_id": "74ad2ec2cd7cd683a773b0affde4ab0b150d74c5",
"index": 4780,
"step-1": "<mask token>\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-2": "<mask token>\n\n\nclass ConcertList(ListCreateAPIView):\n <mask token>\n <mask token>\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-3": "<mask token>\n\n\nclass ConcertList(ListCreateAPIView):\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-4": "from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\nfrom .serializers import ConcertSerializer\nfrom .models import Concert\nfrom .permissions import IsOwnerOrReadOnly\n\n\nclass ConcertList(ListCreateAPIView):\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = IsOwnerOrReadOnly,\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-5": "from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\nfrom .serializers import ConcertSerializer\nfrom .models import Concert\nfrom .permissions import IsOwnerOrReadOnly\n\nclass ConcertList(ListCreateAPIView):\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n\n\nclass ConcertDetail(RetrieveUpdateDestroyAPIView):\n permission_classes = (IsOwnerOrReadOnly,)\n queryset = Concert.objects.all()\n serializer_class = ConcertSerializer\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pytest
import torch
from homura.utils.containers import Map, TensorTuple
def test_map():
map = Map(a=1, b=2)
map["c"] = 3
for k, v in map.items():
assert map[k] == getattr(map, k)
for k in ["update", "keys", "items", "values", "clear", "copy", "get", "pop"]:
with pytest.raises(KeyError):
setattr(map, k, 1)
def test_tensortuple():
a = torch.randn(3, 3), torch.randn(3, 3)
t = TensorTuple(a)
assert t[0].dtype == torch.float32
assert t.to(torch.int32)[0].dtype == torch.int32
|
normal
|
{
"blob_id": "c70b4ff26abe3d85e41bfc7a32cf6e1ce4c48d07",
"index": 6291,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-3": "<mask token>\n\n\ndef test_map():\n map = Map(a=1, b=2)\n map['c'] = 3\n for k, v in map.items():\n assert map[k] == getattr(map, k)\n for k in ['update', 'keys', 'items', 'values', 'clear', 'copy', 'get',\n 'pop']:\n with pytest.raises(KeyError):\n setattr(map, k, 1)\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-4": "import pytest\nimport torch\nfrom homura.utils.containers import Map, TensorTuple\n\n\ndef test_map():\n map = Map(a=1, b=2)\n map['c'] = 3\n for k, v in map.items():\n assert map[k] == getattr(map, k)\n for k in ['update', 'keys', 'items', 'values', 'clear', 'copy', 'get',\n 'pop']:\n with pytest.raises(KeyError):\n setattr(map, k, 1)\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-5": "import pytest\nimport torch\n\nfrom homura.utils.containers import Map, TensorTuple\n\n\ndef test_map():\n map = Map(a=1, b=2)\n map[\"c\"] = 3\n for k, v in map.items():\n assert map[k] == getattr(map, k)\n\n for k in [\"update\", \"keys\", \"items\", \"values\", \"clear\", \"copy\", \"get\", \"pop\"]:\n with pytest.raises(KeyError):\n setattr(map, k, 1)\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 给定一个正整数 n,生成一个包含 1 到 n2 所有元素,且元素按顺时针顺序螺旋排列的正方形矩阵。
#
# DEMO:
# 输入: 3
# 输出:
# [
# [ 1, 2, 3 ],
# [ 8, 9, 4 ],
# [ 7, 6, 5 ]
# ]
class Solution:
def generateMatrix(self, n):
"""
与 54 思路类似,注意边界...
:type n: int
:rtype: List[List[int]]
"""
array = [[0 for _ in range(n)] for _ in range(n)]
top = left = 0
bottom = right = n - 1
cur_num = 1
while left <= right and top <= bottom:
for index in range(left, right + 1):
array[top][index] = cur_num
cur_num += 1
for index in range(top + 1, bottom):
array[index][right] = cur_num
cur_num += 1
if top < bottom:
for index in range(right, left - 1, -1):
array[bottom][index] = cur_num
cur_num += 1
if left < right:
for index in range(bottom - 1, top, -1):
array[index][left] = cur_num
cur_num += 1
left += 1
right -= 1
top += 1
bottom -= 1
return array
|
normal
|
{
"blob_id": "f6bfb055e1c1750702580fc9c9295b8528218910",
"index": 7416,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def generateMatrix(self, n):\n \"\"\"\n 与 54 思路类似,注意边界...\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n array = [[(0) for _ in range(n)] for _ in range(n)]\n top = left = 0\n bottom = right = n - 1\n cur_num = 1\n while left <= right and top <= bottom:\n for index in range(left, right + 1):\n array[top][index] = cur_num\n cur_num += 1\n for index in range(top + 1, bottom):\n array[index][right] = cur_num\n cur_num += 1\n if top < bottom:\n for index in range(right, left - 1, -1):\n array[bottom][index] = cur_num\n cur_num += 1\n if left < right:\n for index in range(bottom - 1, top, -1):\n array[index][left] = cur_num\n cur_num += 1\n left += 1\n right -= 1\n top += 1\n bottom -= 1\n return array\n",
"step-4": "# 给定一个正整数 n,生成一个包含 1 到 n2 所有元素,且元素按顺时针顺序螺旋排列的正方形矩阵。\n#\n# DEMO:\n# 输入: 3\n# 输出:\n# [\n# [ 1, 2, 3 ],\n# [ 8, 9, 4 ],\n# [ 7, 6, 5 ]\n# ]\n\nclass Solution:\n def generateMatrix(self, n):\n \"\"\"\n 与 54 思路类似,注意边界...\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n array = [[0 for _ in range(n)] for _ in range(n)]\n top = left = 0\n bottom = right = n - 1\n cur_num = 1\n while left <= right and top <= bottom:\n for index in range(left, right + 1):\n array[top][index] = cur_num\n cur_num += 1\n for index in range(top + 1, bottom):\n array[index][right] = cur_num\n cur_num += 1\n if top < bottom:\n for index in range(right, left - 1, -1):\n array[bottom][index] = cur_num\n cur_num += 1\n if left < right:\n for index in range(bottom - 1, top, -1):\n array[index][left] = cur_num\n cur_num += 1\n left += 1\n right -= 1\n top += 1\n bottom -= 1\n return array\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# CS 5010 Project
# Team Metro
# Test the data cleaning
import unittest
from cleaning_data import dfClean # import the dataframe we created after cleaning the data
class DataTypesTestCase(unittest.TestCase):
# we will test that each column has the correct data type
# note that there is a strange occurence seen below when converting to a pandas dataframe
def test_is_holiday_a_string(self):
holiday = dfClean.iloc[4908,0]
self.assertTrue(isinstance(holiday, str))
def test_is_temperature_a_float(self):
temp = dfClean.iloc[4908,1]
self.assertTrue(isinstance(temp, float))
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908,2]
self.assertTrue(isinstance(rain, float))
def test_is_snow_a_float(self):
snow = dfClean.iloc[4908,3]
self.assertTrue(isinstance(snow, float))
def test_is_clouds_an_int(self):
clouds = dfClean.iloc[4908,4]
self.assertEqual(str(type(clouds)), "<class 'numpy.int64'>")
# pandas converts all of the ints in the list to numpy.int64
# could not figure out how to avoid this
def test_is_weather_main_a_string(self):
weather = dfClean.iloc[4908,5]
self.assertTrue(isinstance(weather, str))
def test_is_weather_descrip_a_string(self):
weather = dfClean.iloc[4908,6]
self.assertTrue(isinstance(weather, str))
def test_is_date_time_a_string(self):
dateTime = dfClean.iloc[4908,7]
self.assertTrue(isinstance(dateTime, str))
def test_is_traffic_an_int(self):
traffic = dfClean.iloc[4908,8]
self.assertEqual(str(type(traffic)), "<class 'numpy.int64'>")
def test_is_month_an_int(self):
month = dfClean.iloc[4908,9]
self.assertEqual(str(type(month)), "<class 'numpy.int64'>")
def test_is_day_an_int(self):
day = dfClean.iloc[4908,10]
self.assertEqual(str(type(day)), "<class 'numpy.int64'>")
def test_is_year_an_int(self):
year = dfClean.iloc[4908,11]
self.assertEqual(str(type(year)), "<class 'numpy.int64'>")
def test_is_hour_an_int(self):
hour = dfClean.iloc[4908,12]
self.assertEqual(str(type(hour)), "<class 'numpy.int64'>")
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][2] != "/":
booln = False
i += 1
self.assertTrue(booln)
# make sure that every data point has a two digit month
# in cleaning, 0 should have been added to make it two digits
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][5] != "/":
booln = False
i += 1
self.assertTrue(booln)
# all months in the date/time string should have two digits after cleaning
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][6:8] != "20":
booln = False
i += 1
self.assertTrue(booln)
# all years should be in the form 20xx in the date/time string
def test_does_hour_have_two_digits(self):
i = 0
booln = True # since we already tested all of the other cleaning items on the date/time string
while i < len(dfClean): # we can check the hour by checking the length of the whole string
if len(dfClean.iloc[i,7]) != 16: # all in column should have the form "mm/dd/yyyy hh:00"
booln = False
i += 1
self.assertTrue(booln)
# in cleaning, 0 should have been added to make a one digit hour (0-9) two digits (00-09)
# without the other tests this would be a way to check all in one test but would not
# tell us what part of the cleaning on the date/time string did not work correctly
class AppendColumnsTestCase(unittest.TestCase):
# we will check that each of the four new columns (month, day, year, and hour)
# appended correctly to the dataset
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,9]) != int(dfClean.iloc[i,7][:2]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the month in the month column matches that in the original date/time column
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,10]) != int(dfClean.iloc[i,7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the day in the day column matches that in the original date/time column
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,11]) != int(dfClean.iloc[i,7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the year in the year column matches that in the original date/time column
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,12]) != int(dfClean.iloc[i,7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the hour in the hour column matches that in the original date/time column
class HolidayTestCase(unittest.TestCase):
# we test that every hour of the same day has a consistent holiday
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = "None"
while i < len(dfClean):
if dfClean.iloc[i,12] == 0:
hol = dfClean.iloc[i,0]
else:
if dfClean.iloc[i,0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
# this test ensures that no two data points have the exact same date and hour
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7] == dfClean.iloc[i-1,7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
# we test that the temperature was converted to Fahrenheit
# note that since we overrode the original temperature, we simply check for
# outlier that would make sense as Kelvin values but not Fahrenheit values
# This how we discovered there were some missing temperatures input as 0 Kelvin
# because they converted to -450 Fahrenheit
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i,1] > 120) | (dfClean.iloc[i,1] < -50):
booln = False
i += 1
self.assertTrue(booln)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "9d0727970c760a9a8123c5c07359ba5c538cea3c",
"index": 5926,
"step-1": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n\n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908, 1]\n self.assertTrue(isinstance(temp, float))\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908, 3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908, 4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908, 5]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908, 6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908, 7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908, 8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908, 9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908, 10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908, 11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n\n def test_is_holiday_a_string(self):\n holiday = dfClean.iloc[4908, 0]\n self.assertTrue(isinstance(holiday, str))\n\n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908, 1]\n self.assertTrue(isinstance(temp, float))\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908, 3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908, 4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908, 5]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908, 6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908, 7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908, 8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908, 9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908, 10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908, 11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# CS 5010 Project \n\n# Team Metro\n\n# Test the data cleaning\n\nimport unittest\nfrom cleaning_data import dfClean # import the dataframe we created after cleaning the data\n\n\nclass DataTypesTestCase(unittest.TestCase):\n\n # we will test that each column has the correct data type\n # note that there is a strange occurence seen below when converting to a pandas dataframe\n\n def test_is_holiday_a_string(self):\n holiday = dfClean.iloc[4908,0]\n self.assertTrue(isinstance(holiday, str))\n \n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908,1]\n self.assertTrue(isinstance(temp, float))\n \n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908,2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908,3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908,4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n # pandas converts all of the ints in the list to numpy.int64 \n # could not figure out how to avoid this\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908,5]\n self.assertTrue(isinstance(weather, str))\n \n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908,6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908,7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908,8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908,9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908,10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908,11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n \n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908,12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n \n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n def test_does_month_have_two_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][2] != \"/\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # make sure that every data point has a two digit month\n # in cleaning, 0 should have been added to make it two digits\n \n def test_does_day_have_two_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][5] != \"/\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # all months in the date/time string should have two digits after cleaning\n\n def test_does_year_have_four_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][6:8] != \"20\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # all years should be in the form 20xx in the date/time string\n \n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True # since we already tested all of the other cleaning items on the date/time string\n while i < len(dfClean): # we can check the hour by checking the length of the whole string\n if len(dfClean.iloc[i,7]) != 16: # all in column should have the form \"mm/dd/yyyy hh:00\"\n booln = False\n i += 1\n self.assertTrue(booln) \n # in cleaning, 0 should have been added to make a one digit hour (0-9) two digits (00-09)\n # without the other tests this would be a way to check all in one test but would not\n # tell us what part of the cleaning on the date/time string did not work correctly\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n # we will check that each of the four new columns (month, day, year, and hour)\n # appended correctly to the dataset\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,9]) != int(dfClean.iloc[i,7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the month in the month column matches that in the original date/time column\n \n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,10]) != int(dfClean.iloc[i,7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the day in the day column matches that in the original date/time column\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,11]) != int(dfClean.iloc[i,7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the year in the year column matches that in the original date/time column\n\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,12]) != int(dfClean.iloc[i,7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the hour in the hour column matches that in the original date/time column\n \n\nclass HolidayTestCase(unittest.TestCase):\n # we test that every hour of the same day has a consistent holiday\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = \"None\"\n while i < len(dfClean):\n if dfClean.iloc[i,12] == 0:\n hol = dfClean.iloc[i,0]\n else:\n if dfClean.iloc[i,0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n # this test ensures that no two data points have the exact same date and hour\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7] == dfClean.iloc[i-1,7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n \n\nclass TemperatureConversionTestCase(unittest.TestCase):\n # we test that the temperature was converted to Fahrenheit\n # note that since we overrode the original temperature, we simply check for \n # outlier that would make sense as Kelvin values but not Fahrenheit values\n # This how we discovered there were some missing temperatures input as 0 Kelvin\n # because they converted to -450 Fahrenheit\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i,1] > 120) | (dfClean.iloc[i,1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\nif __name__ == '__main__': \n unittest.main() ",
"step-ids": [
18,
19,
29,
31,
33
]
}
|
[
18,
19,
29,
31,
33
] |
class Mood(object):
GENERIC = 1
HIGH_TEMP = 2
LOW_TEMP = 3
HIGH_DUST = 4
LOW_DUST = 5
def decision(self, data):
temp = float(data)
if temp <= 10:
return self.LOW_TEMP
if temp > 30:
return self.HIGH_TEMP
if (10 < temp <=30):
return self.GENERIC
|
normal
|
{
"blob_id": "511016b9cd54f6824360d609ede233b9cc3e4447",
"index": 7564,
"step-1": "<mask token>\n",
"step-2": "class Mood(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Mood(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def decision(self, data):\n temp = float(data)\n if temp <= 10:\n return self.LOW_TEMP\n if temp > 30:\n return self.HIGH_TEMP\n if 10 < temp <= 30:\n return self.GENERIC\n",
"step-4": "class Mood(object):\n GENERIC = 1\n HIGH_TEMP = 2\n LOW_TEMP = 3\n HIGH_DUST = 4\n LOW_DUST = 5\n\n def decision(self, data):\n temp = float(data)\n if temp <= 10:\n return self.LOW_TEMP\n if temp > 30:\n return self.HIGH_TEMP\n if 10 < temp <= 30:\n return self.GENERIC\n",
"step-5": "class Mood(object):\n\n GENERIC = 1\n HIGH_TEMP = 2\n LOW_TEMP = 3\n HIGH_DUST = 4\n LOW_DUST = 5\n\n def decision(self, data):\n temp = float(data)\n\n if temp <= 10:\n return self.LOW_TEMP\n\n if temp > 30:\n return self.HIGH_TEMP\n\n if (10 < temp <=30):\n return self.GENERIC\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import random
import math
# takes 2 row series and calculates the distances between them
def euclidean_dist(a: pd.Series, b: pd.Series):
diff = a.sub(other=b)
squares = diff ** 2
dist = 0
for feature_distance in squares:
if not math.isnan(feature_distance):
dist += feature_distance
return math.sqrt(dist)
# takes copy of dataframe; returns initialized centroid array
def choose_centroids(data_copy: pd.DataFrame):
new_centroids = []
# randomly picks k centroids
for i in range(0, k):
distance_scores = []
# picks furthest centroid from each other if the first one has been picked; else picks a random initial point
if i != 0:
for j in new_centroids:
distances = []
# for j existing centroids, compare to all other points and selects from all of j for next centroid
for row in data_copy.iterrows():
distances.append((euclidean_dist(j, row[1]), row[0]))
distances.sort()
distance_scores.append(distances[-1])
distance_scores.sort()
centroid_index = distance_scores[-1][1]
else:
centroid_index = random.randrange(num_rows)
# drops centroid from copied dataframe to avoid duplicates
data_copy.drop(labels=centroid_index, axis=0, inplace=True)
# appends the newly selected centroid to the list
new_centroids.append(data.iloc[centroid_index])
return new_centroids
def assign_centroids():
cluster_ids = [] # array for storing column output
cluster_dict = {} # dict for mapping centroid IDs (i.e. 89, 102, 34, etc.) to (0, 1, 2, ..., k)
counter = 0
for i in centroids:
if i.name is None:
i.name = counter
cluster_dict[i.name] = counter
counter += 1 # crude way of assigning centroid IDs
for row in data.iterrows():
distances = []
for j in centroids:
dist = euclidean_dist(row[1], j)
if dist != 0:
distances.append((dist, j.name))
distances.sort()
cluster_ids.append(cluster_dict[distances[0][1]])
# inserts cluster assignment column;
# if column already exists, catches exception and removes the column before insertion
try:
data.insert(6, "ClusterID", cluster_ids)
except ValueError:
data.drop(columns="ClusterID", axis=1, inplace=True)
data.insert(6, "ClusterID", cluster_ids)
except IndexError:
data.drop(columns="ClusterID", axis=1, inplace=True)
data.insert(6, "ClusterID", cluster_ids)
return cluster_ids
def recalculate_clusters():
# for k centroids, take the mean of all values belonging to the centroid and make that point the new centroid
for i in range(0, k):
cluster = pd.DataFrame()
for item in data.iterrows():
if item[1].loc['ClusterID'] == i:
cluster = cluster.append(other=item[1])
centroids[i] = cluster.mean()
data = pd.read_csv("data/fire_data_2011.csv")
# uses a dict to convert from tree genus i.e. "Pinu", "Pice",... to 0, 1,...
counter = 0
tree_count_dict = {}
for i in data.iterrows():
try:
tree_count_dict[i[1]["tree_genus"]]
except KeyError:
tree_count_dict[i[1]["tree_genus"]] = counter
counter += 1
data = data.copy().replace(to_replace=tree_count_dict)
print(data)
k = 7
num_rows = data.iloc[-1].name # gets label of the last row to figure out how many instances are in the data
# giving temporary copy of data so selected values can be removed so there aren't duplicate centroids
centroids = choose_centroids(data.copy())
cluster_assignments = []
unchanged_iteration_count = 0
for iterations in range(0, 100):
print("Clustering Progress: [", iterations + 1, "/ 100 ]")
# update previous cluster assignments; reassign cluster IDs and recalculate centroids
previous_assignments = cluster_assignments.copy()
cluster_assignments = assign_centroids()
recalculate_clusters()
# checks if cluster assignments have changed from one iteration to another
if previous_assignments == cluster_assignments and len(previous_assignments) > 0:
unchanged_iteration_count += 1
else:
unchanged_iteration_count = 0
# if cluster assignments haven't changed in 3 iterations, break from loop and exit
if unchanged_iteration_count > 3:
print("Exiting early: cluster assignments haven't changed in 3 iterations")
break
print("\nCluster Counts ( k =", k, "):")
for i in range(0, k):
print("Cluster", i + 1, ": ", cluster_assignments.count(i))
print("\n\n", data)
data.to_csv("./data/fire_data_2011_clustered.csv")
|
normal
|
{
"blob_id": "46b51f46f6ed73e3b9dc2f759535ba71facd2aae",
"index": 5712,
"step-1": "<mask token>\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\n<mask token>\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1]['tree_genus']]\n except KeyError:\n tree_count_dict[i[1]['tree_genus']] = counter\n counter += 1\n<mask token>\nprint(data)\n<mask token>\nfor iterations in range(0, 100):\n print('Clustering Progress: [', iterations + 1, '/ 100 ]')\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n if previous_assignments == cluster_assignments and len(previous_assignments\n ) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n if unchanged_iteration_count > 3:\n print(\n \"Exiting early: cluster assignments haven't changed in 3 iterations\"\n )\n break\nprint(\"\"\"\nCluster Counts ( k =\"\"\", k, '):')\nfor i in range(0, k):\n print('Cluster', i + 1, ': ', cluster_assignments.count(i))\nprint('\\n\\n', data)\ndata.to_csv('./data/fire_data_2011_clustered.csv')\n",
"step-3": "<mask token>\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\ndata = pd.read_csv('data/fire_data_2011.csv')\ncounter = 0\ntree_count_dict = {}\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1]['tree_genus']]\n except KeyError:\n tree_count_dict[i[1]['tree_genus']] = counter\n counter += 1\ndata = data.copy().replace(to_replace=tree_count_dict)\nprint(data)\nk = 7\nnum_rows = data.iloc[-1].name\ncentroids = choose_centroids(data.copy())\ncluster_assignments = []\nunchanged_iteration_count = 0\nfor iterations in range(0, 100):\n print('Clustering Progress: [', iterations + 1, '/ 100 ]')\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n if previous_assignments == cluster_assignments and len(previous_assignments\n ) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n if unchanged_iteration_count > 3:\n print(\n \"Exiting early: cluster assignments haven't changed in 3 iterations\"\n )\n break\nprint(\"\"\"\nCluster Counts ( k =\"\"\", k, '):')\nfor i in range(0, k):\n print('Cluster', i + 1, ': ', cluster_assignments.count(i))\nprint('\\n\\n', data)\ndata.to_csv('./data/fire_data_2011_clustered.csv')\n",
"step-4": "import pandas as pd\nimport random\nimport math\n\n\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n return math.sqrt(dist)\n\n\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n for i in range(0, k):\n distance_scores = []\n if i != 0:\n for j in new_centroids:\n distances = []\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n distances.sort()\n distance_scores.append(distances[-1])\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n else:\n centroid_index = random.randrange(num_rows)\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n new_centroids.append(data.iloc[centroid_index])\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = []\n cluster_dict = {}\n counter = 0\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1\n for row in data.iterrows():\n distances = []\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n try:\n data.insert(6, 'ClusterID', cluster_ids)\n except ValueError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n except IndexError:\n data.drop(columns='ClusterID', axis=1, inplace=True)\n data.insert(6, 'ClusterID', cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\ndata = pd.read_csv('data/fire_data_2011.csv')\ncounter = 0\ntree_count_dict = {}\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1]['tree_genus']]\n except KeyError:\n tree_count_dict[i[1]['tree_genus']] = counter\n counter += 1\ndata = data.copy().replace(to_replace=tree_count_dict)\nprint(data)\nk = 7\nnum_rows = data.iloc[-1].name\ncentroids = choose_centroids(data.copy())\ncluster_assignments = []\nunchanged_iteration_count = 0\nfor iterations in range(0, 100):\n print('Clustering Progress: [', iterations + 1, '/ 100 ]')\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n if previous_assignments == cluster_assignments and len(previous_assignments\n ) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n if unchanged_iteration_count > 3:\n print(\n \"Exiting early: cluster assignments haven't changed in 3 iterations\"\n )\n break\nprint(\"\"\"\nCluster Counts ( k =\"\"\", k, '):')\nfor i in range(0, k):\n print('Cluster', i + 1, ': ', cluster_assignments.count(i))\nprint('\\n\\n', data)\ndata.to_csv('./data/fire_data_2011_clustered.csv')\n",
"step-5": "import pandas as pd\nimport random\nimport math\n\n\n# takes 2 row series and calculates the distances between them\ndef euclidean_dist(a: pd.Series, b: pd.Series):\n diff = a.sub(other=b)\n squares = diff ** 2\n dist = 0\n\n for feature_distance in squares:\n if not math.isnan(feature_distance):\n dist += feature_distance\n\n return math.sqrt(dist)\n\n\n# takes copy of dataframe; returns initialized centroid array\ndef choose_centroids(data_copy: pd.DataFrame):\n new_centroids = []\n\n # randomly picks k centroids\n for i in range(0, k):\n distance_scores = []\n\n # picks furthest centroid from each other if the first one has been picked; else picks a random initial point\n if i != 0:\n for j in new_centroids:\n distances = []\n\n # for j existing centroids, compare to all other points and selects from all of j for next centroid\n for row in data_copy.iterrows():\n distances.append((euclidean_dist(j, row[1]), row[0]))\n\n distances.sort()\n distance_scores.append(distances[-1])\n\n distance_scores.sort()\n centroid_index = distance_scores[-1][1]\n\n else:\n centroid_index = random.randrange(num_rows)\n\n # drops centroid from copied dataframe to avoid duplicates\n data_copy.drop(labels=centroid_index, axis=0, inplace=True)\n\n # appends the newly selected centroid to the list\n new_centroids.append(data.iloc[centroid_index])\n\n return new_centroids\n\n\ndef assign_centroids():\n cluster_ids = [] # array for storing column output\n cluster_dict = {} # dict for mapping centroid IDs (i.e. 89, 102, 34, etc.) to (0, 1, 2, ..., k)\n counter = 0\n\n for i in centroids:\n if i.name is None:\n i.name = counter\n cluster_dict[i.name] = counter\n counter += 1 # crude way of assigning centroid IDs\n\n for row in data.iterrows():\n distances = []\n\n for j in centroids:\n dist = euclidean_dist(row[1], j)\n if dist != 0:\n distances.append((dist, j.name))\n\n distances.sort()\n cluster_ids.append(cluster_dict[distances[0][1]])\n\n # inserts cluster assignment column;\n # if column already exists, catches exception and removes the column before insertion\n try:\n data.insert(6, \"ClusterID\", cluster_ids)\n except ValueError:\n data.drop(columns=\"ClusterID\", axis=1, inplace=True)\n data.insert(6, \"ClusterID\", cluster_ids)\n except IndexError:\n data.drop(columns=\"ClusterID\", axis=1, inplace=True)\n data.insert(6, \"ClusterID\", cluster_ids)\n return cluster_ids\n\n\ndef recalculate_clusters():\n # for k centroids, take the mean of all values belonging to the centroid and make that point the new centroid\n for i in range(0, k):\n cluster = pd.DataFrame()\n for item in data.iterrows():\n if item[1].loc['ClusterID'] == i:\n cluster = cluster.append(other=item[1])\n centroids[i] = cluster.mean()\n\n\ndata = pd.read_csv(\"data/fire_data_2011.csv\")\n\n# uses a dict to convert from tree genus i.e. \"Pinu\", \"Pice\",... to 0, 1,...\ncounter = 0\ntree_count_dict = {}\nfor i in data.iterrows():\n try:\n tree_count_dict[i[1][\"tree_genus\"]]\n except KeyError:\n tree_count_dict[i[1][\"tree_genus\"]] = counter\n counter += 1\n\ndata = data.copy().replace(to_replace=tree_count_dict)\nprint(data)\n\nk = 7\nnum_rows = data.iloc[-1].name # gets label of the last row to figure out how many instances are in the data\n\n# giving temporary copy of data so selected values can be removed so there aren't duplicate centroids\ncentroids = choose_centroids(data.copy())\n\ncluster_assignments = []\nunchanged_iteration_count = 0\n\nfor iterations in range(0, 100):\n print(\"Clustering Progress: [\", iterations + 1, \"/ 100 ]\")\n\n # update previous cluster assignments; reassign cluster IDs and recalculate centroids\n previous_assignments = cluster_assignments.copy()\n cluster_assignments = assign_centroids()\n recalculate_clusters()\n\n # checks if cluster assignments have changed from one iteration to another\n if previous_assignments == cluster_assignments and len(previous_assignments) > 0:\n unchanged_iteration_count += 1\n else:\n unchanged_iteration_count = 0\n\n # if cluster assignments haven't changed in 3 iterations, break from loop and exit\n if unchanged_iteration_count > 3:\n print(\"Exiting early: cluster assignments haven't changed in 3 iterations\")\n break\n\nprint(\"\\nCluster Counts ( k =\", k, \"):\")\nfor i in range(0, k):\n print(\"Cluster\", i + 1, \": \", cluster_assignments.count(i))\n\nprint(\"\\n\\n\", data)\n\ndata.to_csv(\"./data/fire_data_2011_clustered.csv\")\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.shortcuts import render
from .. login.models import *
def user(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/user.html', context)
def admin(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/admin.html', context)
|
normal
|
{
"blob_id": "3d737d0ee9c3af1f8ebe4c6998ad30fa34f42856",
"index": 570,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom ..login.models import *\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom .. login.models import *\n\ndef user(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/user.html', context)\n\ndef admin(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/admin.html', context)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import argparse
from data.downloader import *
from data.utils import *
from data.danmaku import *
from utils import *
key = '03fc8eb101b091fb'
parser = argparse.ArgumentParser(description='Download Video From Bilibili')
parser.add_argument('-d', type=str, help='dataset')
parser.add_argument('-o', type=str, default='dataset', help='output directory')
parser.add_argument('-f', type=str, default='mp4', help='format')
parser.add_argument('-c', type=str, default='', help='country')
parser.add_argument('-q', type=int, default=0, help='quality')
parser.add_argument('-i', action='store_true', default=False, help=
'ignore download')
args = parser.parse_args()
cookie = dict()
cookie['DedeUserID'] = '347368229'
cookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'
cookie['sid'] = 'ii8ca1k2'
cookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'
aids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.
join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,
debug=True)
print('[*] Video Download Finished')
infos = dict()
for aid in aids:
extra = dict()
if 'ep' in aid:
epid = aid
aid = attr['aid']
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)
)
page = int(epid[2:]) - int(attr['base'][2:]) + 1
info = GetVideoInfo(aid.strip('av'), key, 1)
else:
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))
info = GetVideoInfo(aid.strip('av'), key)
extra['danmaku'] = request_danmaku(cid=info.cid)
if 'country' in attr:
extra['country'] = attr['country']
extra['complete'] = False
else:
capture = get_capture(fn)
print('[*] Capture : {}'.format(fn))
extra['duration'] = get_duration(capture=capture)
extra['duration'] = get_duration(capture=capture)
extra['nframes'] = get_nframes(capture=capture)
extra['fps'] = get_fps(capture=capture)
extra['boundary'] = get_boundary(fn, capture, extra['nframes'],
'hecate')
extra['positions'] = get_positions(extra['nframes'])
extra['fpsegment'] = get_fpsegment(extra['boundary'])
extra['score'] = get_score(**extra)
extra['summary'] = get_summary(**extra)
extra['complete'] = True
for k, v in extra.items():
setattr(info, k, v)
infos[aid] = info
save_pickle(infos, '{}.info'.format(args.d))
|
normal
|
{
"blob_id": "479411727de14e8032b6d01cdb844632111af688",
"index": 5275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\n<mask token>\nprint('[*] Video Download Finished')\n<mask token>\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-3": "<mask token>\nkey = '03fc8eb101b091fb'\nparser = argparse.ArgumentParser(description='Download Video From Bilibili')\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\nargs = parser.parse_args()\ncookie = dict()\ncookie['DedeUserID'] = '347368229'\ncookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'\ncookie['sid'] = 'ii8ca1k2'\ncookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'\naids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.\n join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,\n debug=True)\nprint('[*] Video Download Finished')\ninfos = dict()\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-4": "import os\nimport argparse\nfrom data.downloader import *\nfrom data.utils import *\nfrom data.danmaku import *\nfrom utils import *\nkey = '03fc8eb101b091fb'\nparser = argparse.ArgumentParser(description='Download Video From Bilibili')\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\nargs = parser.parse_args()\ncookie = dict()\ncookie['DedeUserID'] = '347368229'\ncookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'\ncookie['sid'] = 'ii8ca1k2'\ncookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'\naids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.\n join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,\n debug=True)\nprint('[*] Video Download Finished')\ninfos = dict()\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class TimestechConfig(AppConfig):
name = 'TimesTech'
|
normal
|
{
"blob_id": "94f50e371ef65e86d0d2d40a3ed16946f8811be3",
"index": 2601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TimestechConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TimestechConfig(AppConfig):\n name = 'TimesTech'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass TimestechConfig(AppConfig):\n name = 'TimesTech'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
DEBUG = True
SQLALCHEMY_DATABASE_URI = "postgresql://username:password@IPOrDomain/databasename"
SQLALCHEMY_TRACK_MODIFICATIONS = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 2
|
normal
|
{
"blob_id": "a1b0e72b62abc89d5292f199ec5b6193b544e271",
"index": 7813,
"step-1": "<mask token>\n",
"step-2": "DEBUG = True\nSQLALCHEMY_DATABASE_URI = (\n 'postgresql://username:password@IPOrDomain/databasename')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 2\n",
"step-3": "DEBUG = True\nSQLALCHEMY_DATABASE_URI = \"postgresql://username:password@IPOrDomain/databasename\"\n\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 2\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
###
# This Python module contains commented out classifiers that I will no longer
# be using
###
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Using Decision trees
# dt = DecisionTreeClassifier(max_depth=None)
# dt.fit(X_train_cv, y_train)
# print("DT Accuracy = " + str(dt.score(X_dev_cv, y_dev)))
# Using AdaBoost (takes too long)
# clf = DecisionTreeClassifier()
# ada = AdaBoostClassifier(clf)
# ada.fit(X_train_cv, y_train)
# print("ADA accuracy = " + str(ada.score(X_dev_cv, y_dev)))
# Using Bagging as a classifier with KNN
# clf = KNeighborsClassifier(n_neighbors=10)
# bag = BaggingClassifier(clf, max_features=0.5, max_samples=0.5)
# bag.fit(X_top10_train, y_top10_train)
# print("Bag accuracy = " + str(bag.score(X_top10_dev, y_top10_dev)))
# Using a random forest classifier
# rforest = RandomForestClassifier(max_depth=10000)
# rforest.fit(X_train_cv, y_train)
# print("Random Forest accuracy = " + str(rforest.score(X_dev_cv, y_dev)))
|
normal
|
{
"blob_id": "5029f3e2000c25d6044f93201c698773e310d452",
"index": 3391,
"step-1": "<mask token>\n",
"step-2": "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n",
"step-3": "###\n# This Python module contains commented out classifiers that I will no longer\n# be using\n###\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\n# Using Decision trees\n# dt = DecisionTreeClassifier(max_depth=None)\n# dt.fit(X_train_cv, y_train)\n# print(\"DT Accuracy = \" + str(dt.score(X_dev_cv, y_dev)))\n\n# Using AdaBoost (takes too long)\n# clf = DecisionTreeClassifier()\n# ada = AdaBoostClassifier(clf)\n# ada.fit(X_train_cv, y_train)\n# print(\"ADA accuracy = \" + str(ada.score(X_dev_cv, y_dev)))\n\n# Using Bagging as a classifier with KNN\n# clf = KNeighborsClassifier(n_neighbors=10)\n# bag = BaggingClassifier(clf, max_features=0.5, max_samples=0.5)\n# bag.fit(X_top10_train, y_top10_train)\n# print(\"Bag accuracy = \" + str(bag.score(X_top10_dev, y_top10_dev)))\n\n# Using a random forest classifier\n# rforest = RandomForestClassifier(max_depth=10000)\n# rforest.fit(X_train_cv, y_train)\n# print(\"Random Forest accuracy = \" + str(rforest.score(X_dev_cv, y_dev)))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from typing import Union, Tuple
import numpy as np
from dispim import Volume
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
# FIXME: Doesn't always return the expected shape
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = ((center + half_size + 1) - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx),
int(h / 2 * icropy):int(-h / 2 * icropy),
int(l / 2 * icropz):int(-l / 2 * icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
def show_ipv(data: np.ndarray):
"""
Show a 3d visualization of 3d numpy array
:param data: The numpy array to show
:return: The ipyvolume figure
"""
import ipyvolume as ipv
return ipv.quickvolshow(data)
def threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
# Check if the image is multi-colored or not
if image.min() == image.max():
raise ValueError("threshold_otsu is expected to work with images "
"having more than one color. The input image seems "
"to have just one color {0}.".format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
|
normal
|
{
"blob_id": "26f486131bdf514cd8e41f75d414fe647eaf1140",
"index": 9243,
"step-1": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\n<mask token>\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-2": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\n<mask token>\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-3": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-4": "from typing import Union, Tuple\nimport numpy as np\nfrom dispim import Volume\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-5": "from typing import Union, Tuple\n\nimport numpy as np\n\nfrom dispim import Volume\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n # FIXME: Doesn't always return the expected shape\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n\n max_missing = ((center + half_size + 1) - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n\n w, h, l = data.shape\n\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx),\n int(h / 2 * icropy):int(-h / 2 * icropy),\n int(l / 2 * icropz):int(-l / 2 * icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]\n\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n # Check if the image is multi-colored or not\n if image.min() == image.max():\n raise ValueError(\"threshold_otsu is expected to work with images \"\n \"having more than one color. The input image seems \"\n \"to have just one color {0}.\".format(image.min()))\n\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n\n # class probabilities for all possible thresholds\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n # class means for all possible thresholds\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n\n # Clip ends to align class 1 and class 2 variables:\n # The last value of `weight1`/`mean1` should pair with zero values in\n # `weight2`/`mean2`, which do not exist.\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from dai_imports import*
from obj_utils import*
import utils
class my_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_ = None, obj = False,
minorities = None, diffs = None, bal_tfms = None):
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
# print(self.tfms)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
if self.obj:
s = x.size()[1]
if isinstance(s,tuple):
s = s[0]
row_scale = s/img.size[0]
col_scale = s/img.size[1]
y = rescale_bbox(y,row_scale,col_scale)
y.squeeze_()
y2 = self.data.iloc[index, 2]
y = (y,y2)
return (x,y)
class my_image_folder(DatasetFolder):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None):
super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform,
target_transform=target_transform)
self.imgs = self.samples
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
self.tta_tfms = tta_tfms
self.tfms = None
def __getitem__(self,index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform:
if self.minorities and self.bal_tfms:
if target in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[target]
l = [self.bal_tfms]
l.extend(self.transform)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[target]
self.tfms = transforms.Compose(self.bal_tfms + self.transform )
else:
self.tfms = transforms.Compose(self.transform)
elif self.tta_tfms:
self.tfms = self.tta_tfms
else:
self.tfms = transforms.Compose(self.transform)
sample = self.tfms(sample)
if self.target_transform:
target = self.target_transform(target)
return sample, target
def extract_data(dt):
x = []
y = []
for a,b in dt:
x.append(a)
y.append(b)
return x,y
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def get_minorities(df,thresh=0.8):
c = df.iloc[:,1].value_counts()
lc = list(c)
max_count = lc[0]
diffs = [1-(x/max_count) for x in lc]
diffs = dict((k,v) for k,v in zip(c.keys(),diffs))
minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]
return minorities,diffs
def csv_from_path(path, img_dest):
path = Path(path)
img_dest = Path(img_dest)
labels_paths = list(path.iterdir())
tr_images = []
tr_labels = []
for l in labels_paths:
if l.is_dir():
for i in list(l.iterdir()):
if i.suffix in IMG_EXTENSIONS:
name = i.name
label = l.name
new_name = '{}_{}'.format(path.name,name)
new_path = img_dest/new_name
# print(new_path)
os.rename(i,new_path)
tr_images.append(new_name)
tr_labels.append(label)
# os.rmdir(l)
tr_img_label = {'Img':tr_images, 'Label': tr_labels}
csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])
csv = csv.sample(frac=1).reset_index(drop=True)
return csv
def add_extension(a,e):
a = [x+e for x in a]
return a
def one_hot(targets, multi = False):
if multi:
binerizer = MultiLabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
else:
binerizer = LabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
return dai_1hot,binerizer.classes_
def get_index(arr,a):
for i in range(len(arr)):
if sum(arr[i] == a) == len(a):
return i
return False
def rescale_bbox(bb,row_scale,col_scale):
bb = bb.reshape((-1,4))
for b in bb:
r1,c1,r2,c2 = b
b[0] = int(np.round(r1*col_scale))
b[1] = int(np.round(c1*row_scale))
b[2] = int(np.round(r2*col_scale))
b[3] = int(np.round(c2*row_scale))
# bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)])
# for b in bb:
# r1,c1,r2,c2 = b
# b[0] = int(np.round(r1*row_scale))
# b[1] = int(np.round(c1*col_scale))
# b[2] = int(np.round(r2*row_scale))
# b[3] = int(np.round(c2*col_scale))
# if(sum(b)) == 1:
# b[0],b[1],b[2],b[3] = 0,0,0,0
bb = bb.reshape((1,-1))
return bb
def get_img_stats(dataset,sz):
size = int(len(dataset)*sz)
i = 0
imgs = []
for img,_ in dataset:
# print(img.size())
if i > size:
break
imgs.append(img)
i+=1
imgs_ = torch.stack(imgs,dim=3)
imgs_ = imgs_.view(3,-1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
return imgs_mean,imgs_std
def split_df(train_df,test_size = 0.15):
try:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])
except:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)
train_df = train_df.reset_index(drop = True)
val_df = val_df.reset_index(drop = True)
return train_df,val_df
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class DataProcessor:
def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False,
tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):
print('+------------------------------------+')
print('| Dream AI |')
print('+------------------------------------+')
print()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv,
val_csv,reg,tr_name,val_name,test_name,extension)
self.obj = False
self.multi_label = False
if setup_data:
self.set_up_data()
def set_up_data(self,split_size = 0.15):
data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name)
# check if paths given and also set paths
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path,tr_name)
val_path = os.path.join(data_path,val_name)
test_path = os.path.join(data_path,test_name)
if os.path.exists(os.path.join(data_path,tr_name+'.csv')):
train_csv = tr_name+'.csv'
# if os.path.exists(os.path.join(data_path,val_name+'.csv')):
# val_csv = val_name+'.csv'
# if os.path.exists(os.path.join(data_path,test_name+'.csv')):
# test_csv = test_name+'.csv'
# paths to csv
if not train_csv:
print('no')
train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)
train_csv_path = os.path.join(data_path,train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:,0])]
if self.extension:
img_names = add_extension(img_names,self.extension)
if val_csv:
val_csv_path = os.path.join(data_path,val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(map(str,list(val_df.iloc[:,1])))
if test_csv:
test_csv_path = os.path.join(data_path,test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(map(str,list(test_df.iloc[:,1])))
targets = list(map(str,list(train_df.iloc[:,1])))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if self.obj:
print('\nObject Detection\n')
# bounding boxes
int_targets = [list(map(float,x)) for x in split_targets]
zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)
for i,t in enumerate(zero_targets):
t[len(t)-len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]
# one-hot classes
obj_targets = list(map(str,list(train_df.iloc[:,2])))
obj_split_targets = [t.split() for t in obj_targets]
try:
obj_split_targets = [list(map(int,x)) for x in obj_split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(obj_split_targets,True)
# train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
# class indexes
c_names = list(onehot_classes)
class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets]
zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int)
# print(zero_idx.shape)
for i,t in enumerate(zero_idx):
# temp_l = len(class_idx[i])
# if temp_l > 90:
# print(i,temp_l)
t[len(t)-len(class_idx[i]):] = class_idx[i]
zero_idx[i] = t
train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
# self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)])
elif self.reg:
print('\nRegression\n')
int_targets = [list(map(int,x)) for x in split_targets]
zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)
for i,t in enumerate(zero_targets):
t[len(t)-len(int_targets[i]):] = int_targets[i]
zero_targets[i] = t
train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]
self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1)
elif lengths[1:] != lengths[:-1]:
self.multi_label = True
print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int,x)) for x in split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)
train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
else:
print('\nSingle-label Classification\n')
unique_targets = list(np.unique(targets))
target_ids = [unique_targets.index(x) for x in targets]
train_df.iloc[:,1] = target_ids
if val_csv:
target_ids = [unique_targets.index(x) for x in val_targets]
val_df.iloc[:,1] = target_ids
if test_csv:
target_ids = [unique_targets.index(x) for x in test_targets]
test_df.iloc[:,1] = target_ids
self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets
# self.models_path = os.path.join(self.data_dir, 'models')
# os.makedirs(self.models_path,exist_ok=True)
if not val_csv:
train_df,val_df = split_df(train_df,split_size)
if not test_csv:
val_df,test_df = split_df(val_df,split_size)
tr_images = [str(x) for x in list(train_df.iloc[:,0])]
val_images = [str(x) for x in list(val_df.iloc[:,0])]
test_images = [str(x) for x in list(test_df.iloc[:,0])]
if self.extension:
tr_images = add_extension(tr_images,self.extension)
val_images = add_extension(val_images,self.extension)
test_images = add_extension(test_images,self.extension)
train_df.iloc[:,0] = tr_images
val_df.iloc[:,0] = val_images
test_df.iloc[:,0] = test_images
train_df.to_csv(os.path.join(data_path,'train.csv'),index=False)
val_df.to_csv(os.path.join(data_path,'val.csv'),index=False)
test_df.to_csv(os.path.join(data_path,'test.csv'),index=False)
self.minorities,self.class_diffs = None,None
if (not self.obj) or (not self.multi_label):
self.minorities,self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}
data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,
'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label}
# save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl'))
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):
train_df = csv_from_path(tr_path,tr_path)
train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',None)
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path,tr_path)
val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv')
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path,tr_path)
test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv')
return ret
def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None,
bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'],
data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label'])
if obj or multi_label:
balance = False
if tta:
tta_tfms = {self.tr_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
]),
self.val_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
]),
self.test_name: transforms.Compose(
[
# transforms.TenCrop(s),
transforms.FiveCrop(s[0]),
transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops:torch.stack(
[transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))
])}
# tta_tfms = {self.tr_name: transforms.Compose([
# transforms.Resize(s),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]),
# self.val_name: transforms.Compose([
# transforms.Resize(s),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ]) }
else:
tta_tfms = None
if not bal_tfms:
bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None,
self.test_name: None
}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}
if obj:
resize_transform = transforms.Resize(s)
else:
# resize_transform = transforms.RandomResizedCrop(s[0])
resize_transform = transforms.Resize(s)
if not tfms:
tfms = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
else:
tfms_temp = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
tfms_temp[1:1] = tfms
tfms = tfms_temp
print(tfms)
data_transforms = {
self.tr_name: tfms,
self.val_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
],
self.test_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
}
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)
self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)
data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std
data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std
data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],obj,minorities,class_diffs,bal_tfms[x])
for x in [self.tr_name, self.val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],obj)
for x in [self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,
shuffle=True, num_workers=num_workers)
for x in [self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}
self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,
dataset_sizes)
return image_datasets,dataloaders,dataset_sizes
def imshow(self,inp, title=None):
"""Imshow for Tensor."""
inp = self.denorm_img(inp)
plt.imshow(inp)
if title:
plt.title(title)
plt.pause(0.001)
def denorm_img(self,inp,calculate = False):
inp = inp.numpy().transpose((1, 2, 0))
if calculate:
mean = np.mean(inp)
std = np.std(inp)
else:
mean = self.img_mean.numpy()
std = self.img_std.numpy()
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
def show_data(self,folder_name = 'train', size = (64,64), bs = 5):
self.get_data(size,bs)
batch = next(iter(self.dataloaders[folder_name]))
inputs, classes = batch[0],batch[1]
out = torchvision.utils.make_grid(inputs)
if self.reg:
print(classes)
self.imshow(out, title=[x for x in classes])
elif self.multi_label:
self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes])
else:
self.imshow(out, title=[self.class_names[x] for x in classes])
# def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12):
# # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'
# # .format(anc_grids,anc_zooms,anc_ratios))
# # print('If so, you may call the function "set_up_object_detection" with your own paramteres.')
# cmap = get_cmap(num_colr)
# self.colr_list = [cmap(float(x)) for x in range(num_colr)]
# self.num_colr = num_colr
# self.create_anchors(anc_grids,anc_zooms,anc_ratios)
# self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.)
# self.loss_f = FocalLoss(self.num_classes)
# def create_anchors(self,anc_grids,anc_zooms,anc_ratios):
# anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]
# k = len(anchor_scales)
# anc_offsets = [1/(o*2) for o in anc_grids]
# anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)
# for ao,ag in zip(anc_offsets,anc_grids)])
# anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)
# for ao,ag in zip(anc_offsets,anc_grids)])
# anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)
# anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])
# for ag in anc_grids])
# grid_sizes = torch.tensor(np.concatenate([np.array(
# [ 1/ag for i in range(ag*ag) for o,p in anchor_scales])
# for ag in anc_grids])).float().unsqueeze(1).to(self.device)
# anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)
# anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])
# self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k
|
normal
|
{
"blob_id": "5b8c95354f8b27eff8226ace52ab9e97f98ae217",
"index": 80,
"step-1": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-2": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\n<mask token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-3": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\n<mask token>\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\n<mask token>\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-4": "<mask token>\n\n\nclass my_image_csv_dataset(Dataset):\n\n def __init__(self, data_dir, data, transforms_=None, obj=False,\n minorities=None, diffs=None, bal_tfms=None):\n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n img = torchvision.transforms.functional.to_grayscale(img,\n num_output_channels=3)\n y = self.data.iloc[index, 1]\n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s, tuple):\n s = s[0]\n row_scale = s / img.size[0]\n col_scale = s / img.size[1]\n y = rescale_bbox(y, row_scale, col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = y, y2\n return x, y\n\n\nclass my_image_folder(DatasetFolder):\n\n def __init__(self, root, transform=None, target_transform=None, loader=\n default_loader, minorities=None, diffs=None, bal_tfms=None,\n tta_tfms=None):\n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform, target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms, 'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l)\n else:\n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self\n .transform)\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else:\n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\n\n<mask token>\n\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)]\n\n\n<mask token>\n\n\ndef csv_from_path(path, img_dest):\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name, name)\n new_path = img_dest / new_name\n os.rename(i, new_path)\n tr_images.append(new_name)\n tr_labels.append(label)\n tr_img_label = {'Img': tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label, columns=['Img', 'Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\n\ndef add_extension(a, e):\n a = [(x + e) for x in a]\n return a\n\n\ndef one_hot(targets, multi=False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot, binerizer.classes_\n\n\ndef get_index(arr, a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\n\ndef rescale_bbox(bb, row_scale, col_scale):\n bb = bb.reshape((-1, 4))\n for b in bb:\n r1, c1, r2, c2 = b\n b[0] = int(np.round(r1 * col_scale))\n b[1] = int(np.round(c1 * row_scale))\n b[2] = int(np.round(r2 * col_scale))\n b[3] = int(np.round(c2 * row_scale))\n bb = bb.reshape((1, -1))\n return bb\n\n\ndef get_img_stats(dataset, sz):\n size = int(len(dataset) * sz)\n i = 0\n imgs = []\n for img, _ in dataset:\n if i > size:\n break\n imgs.append(img)\n i += 1\n imgs_ = torch.stack(imgs, dim=3)\n imgs_ = imgs_.view(3, -1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean, imgs_std\n\n\ndef split_df(train_df, test_size=0.15):\n try:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2, stratify=train_df.iloc[:, 1])\n except:\n train_df, val_df = train_test_split(train_df, test_size=test_size,\n random_state=2)\n train_df = train_df.reset_index(drop=True)\n val_df = val_df.reset_index(drop=True)\n return train_df, val_df\n\n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\n\nclass DataProcessor:\n\n def __init__(self, data_path=None, train_csv=None, val_csv=None, reg=\n False, tr_name='train', val_name='val', test_name='test', extension\n =None, setup_data=True):\n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else\n 'cpu')\n (self.data_path, self.train_csv, self.val_csv, self.reg, self.\n tr_name, self.val_name, self.test_name, self.extension) = (\n data_path, train_csv, val_csv, reg, tr_name, val_name,\n test_name, extension)\n self.obj = False\n self.multi_label = False\n if setup_data:\n self.set_up_data()\n\n def set_up_data(self, split_size=0.15):\n data_path, train_csv, val_csv, tr_name, val_name, test_name = (self\n .data_path, self.train_csv, self.val_csv, self.tr_name, self.\n val_name, self.test_name)\n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path, tr_name)\n val_path = os.path.join(data_path, val_name)\n test_path = os.path.join(data_path, test_name)\n if os.path.exists(os.path.join(data_path, tr_name + '.csv')):\n train_csv = tr_name + '.csv'\n if not train_csv:\n print('no')\n train_csv, val_csv, test_csv = self.data_from_paths_to_csv(\n data_path, tr_path, val_path, test_path)\n train_csv_path = os.path.join(data_path, train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True\n img_names = [str(x) for x in list(train_df.iloc[:, 0])]\n if self.extension:\n img_names = add_extension(img_names, self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path, val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str, list(val_df.iloc[:, 1])))\n if test_csv:\n test_csv_path = os.path.join(data_path, test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str, list(test_df.iloc[:, 1])))\n targets = list(map(str, list(train_df.iloc[:, 1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n int_targets = [list(map(float, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n obj_targets = list(map(str, list(train_df.iloc[:, 2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int, x)) for x in\n obj_split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(obj_split_targets, True)\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in\n obj_split_targets]\n zero_idx = np.zeros((len(targets), max(lengths) // 4), dtype=int)\n for i, t in enumerate(zero_idx):\n t[len(t) - len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:, 2] = [torch.from_numpy(z).type(torch.\n LongTensor) for z in zero_idx]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int, x)) for x in split_targets]\n zero_targets = np.zeros((len(targets), max(lengths)), dtype=int)\n for i, t in enumerate(zero_targets):\n t[len(t) - len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:, 1] = [torch.from_numpy(z).type(torch.\n FloatTensor) for z in zero_targets]\n self.data_dir, self.num_classes, self.class_names = data_path, max(\n lengths), np.unique(zero_targets, axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int, x)) for x in split_targets]\n except:\n pass\n dai_onehot, onehot_classes = one_hot(split_targets, self.\n multi_label)\n train_df.iloc[:, 1] = [torch.from_numpy(x).type(torch.\n FloatTensor) for x in dai_onehot]\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n onehot_classes), onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:, 1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:, 1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:, 1] = target_ids\n self.data_dir, self.num_classes, self.class_names = data_path, len(\n unique_targets), unique_targets\n if not val_csv:\n train_df, val_df = split_df(train_df, split_size)\n if not test_csv:\n val_df, test_df = split_df(val_df, split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:, 0])]\n val_images = [str(x) for x in list(val_df.iloc[:, 0])]\n test_images = [str(x) for x in list(test_df.iloc[:, 0])]\n if self.extension:\n tr_images = add_extension(tr_images, self.extension)\n val_images = add_extension(val_images, self.extension)\n test_images = add_extension(test_images, self.extension)\n train_df.iloc[:, 0] = tr_images\n val_df.iloc[:, 0] = val_images\n test_df.iloc[:, 0] = test_images\n train_df.to_csv(os.path.join(data_path, 'train.csv'), index=False)\n val_df.to_csv(os.path.join(data_path, 'val.csv'), index=False)\n test_df.to_csv(os.path.join(data_path, 'test.csv'), index=False)\n self.minorities, self.class_diffs = None, None\n if not self.obj or not self.multi_label:\n self.minorities, self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name: train_df, self.val_name: val_df,\n self.test_name: test_df}\n data_dict = {'data_dfs': self.data_dfs, 'data_dir': self.data_dir,\n 'num_classes': self.num_classes, 'class_names': self.\n class_names, 'minorities': self.minorities, 'class_diffs': self\n .class_diffs, 'obj': self.obj, 'multi_label': self.multi_label}\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self, data_path, tr_path, val_path=None,\n test_path=None):\n train_df = csv_from_path(tr_path, tr_path)\n train_df.to_csv(os.path.join(data_path, self.tr_name + '.csv'),\n index=False)\n ret = self.tr_name + '.csv', None\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path, tr_path)\n val_df.to_csv(os.path.join(data_path, self.val_name +\n '.csv'), index=False)\n ret = self.tr_name + '.csv', self.val_name + '.csv'\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path, tr_path)\n test_df.to_csv(os.path.join(data_path, self.test_name +\n '.csv'), index=False)\n ret = (self.tr_name + '.csv', self.val_name + '.csv', self.\n test_name + '.csv')\n return ret\n\n def get_data(self, data_dict=None, s=(224, 224), dataset=\n my_image_csv_dataset, bs=32, balance=False, tfms=None, bal_tfms=\n None, tta=False, num_workers=4, stats_percentage=0.6):\n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs, data_dir, minorities, class_diffs, obj, multi_label = (\n data_dict['data_dfs'], data_dict['data_dir'], data_dict[\n 'minorities'], data_dict['class_diffs'], data_dict['obj'],\n data_dict['multi_label'])\n if obj or multi_label:\n balance = False\n if tta:\n tta_tfms = {self.tr_name: transforms.Compose([transforms.\n FiveCrop(s[0]), transforms.Lambda(lambda crops: torch.stack\n ([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack([transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(\n crop) for crop in crops]))]), self.val_name: transforms.\n Compose([transforms.FiveCrop(s[0]), transforms.Lambda(lambda\n crops: torch.stack([transforms.ToTensor()(crop) for crop in\n crops])), transforms.Lambda(lambda crops: torch.stack([\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])(crop) for crop in crops]))]), self.test_name:\n transforms.Compose([transforms.FiveCrop(s[0]), transforms.\n Lambda(lambda crops: torch.stack([transforms.ToTensor()(\n crop) for crop in crops])), transforms.Lambda(lambda crops:\n torch.stack([transforms.Normalize([0.485, 0.456, 0.406], [\n 0.229, 0.224, 0.225])(crop) for crop in crops]))])}\n else:\n tta_tfms = None\n if not bal_tfms:\n bal_tfms = {self.tr_name: [transforms.RandomHorizontalFlip()],\n self.val_name: None, self.test_name: None}\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.\n test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [resize_transform, transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]\n else:\n tfms_temp = [resize_transform, transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, \n 0.225])]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n data_transforms = {self.tr_name: tfms, self.val_name: [transforms.\n Resize(s), transforms.ToTensor(), transforms.Normalize([0.485, \n 0.456, 0.406], [0.229, 0.224, 0.225])], self.test_name: [\n transforms.Resize(s), transforms.ToTensor(), transforms.\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]}\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir, self.tr_name),\n data_dfs[self.tr_name], temp_tfms)\n self.img_mean, self.img_std = get_img_stats(temp_dataset,\n stats_percentage)\n data_transforms[self.tr_name][-1].mean, data_transforms[self.tr_name][\n -1].std = self.img_mean, self.img_std\n data_transforms[self.val_name][-1].mean, data_transforms[self.val_name\n ][-1].std = self.img_mean, self.img_std\n data_transforms[self.test_name][-1].mean, data_transforms[self.\n test_name][-1].std = self.img_mean, self.img_std\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj, minorities,\n class_diffs, bal_tfms[x]) for x in [self.tr_name, self.\n val_name, self.test_name]}\n else:\n image_datasets = {x: dataset(os.path.join(data_dir, self.\n tr_name), data_dfs[x], data_transforms[x], obj) for x in [\n self.tr_name, self.val_name, self.test_name]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=bs, shuffle=True, num_workers=num_workers) for x in\n [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name,\n self.val_name, self.test_name]}\n self.image_datasets, self.dataloaders, self.dataset_sizes = (\n image_datasets, dataloaders, dataset_sizes)\n return image_datasets, dataloaders, dataset_sizes\n\n def imshow(self, inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self, inp, calculate=False):\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else:\n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp\n\n def show_data(self, folder_name='train', size=(64, 64), bs=5):\n self.get_data(size, bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0], batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes])\n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(\n torch.LongTensor))] for x in classes])\n else:\n self.imshow(out, title=[self.class_names[x] for x in classes])\n",
"step-5": "from dai_imports import*\nfrom obj_utils import*\nimport utils\n\nclass my_image_csv_dataset(Dataset):\n \n def __init__(self, data_dir, data, transforms_ = None, obj = False,\n minorities = None, diffs = None, bal_tfms = None):\n \n self.data_dir = data_dir\n self.data = data\n self.transforms_ = transforms_\n self.tfms = None\n self.obj = obj\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n assert transforms_ is not None, print('Please pass some transforms.')\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])\n img = Image.open(img_path)\n img = img.convert('RGB')\n\n img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)\n\n y = self.data.iloc[index, 1] \n if self.minorities and self.bal_tfms:\n if y in self.minorities:\n if hasattr(self.bal_tfms,'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[y]\n l = [self.bal_tfms]\n l.extend(self.transforms_)\n self.tfms = transforms.Compose(l) \n else: \n for t in self.bal_tfms:\n t.p = self.diffs[y]\n self.transforms_[1:1] = self.bal_tfms \n self.tfms = transforms.Compose(self.transforms_)\n # print(self.tfms)\n else:\n self.tfms = transforms.Compose(self.transforms_)\n else: \n self.tfms = transforms.Compose(self.transforms_) \n x = self.tfms(img)\n if self.obj:\n s = x.size()[1]\n if isinstance(s,tuple):\n s = s[0]\n row_scale = s/img.size[0]\n col_scale = s/img.size[1]\n y = rescale_bbox(y,row_scale,col_scale)\n y.squeeze_()\n y2 = self.data.iloc[index, 2]\n y = (y,y2)\n return (x,y)\n\n\nclass my_image_folder(DatasetFolder):\n \n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader, minorities=None, diffs = None, bal_tfms=None, tta_tfms = None):\n \n super(my_image_folder, self).__init__(root, loader, IMG_EXTENSIONS,\n transform=transform,\n target_transform=target_transform)\n self.imgs = self.samples\n self.minorities = minorities\n self.diffs = diffs\n self.bal_tfms = bal_tfms\n self.tta_tfms = tta_tfms\n self.tfms = None\n\n def __getitem__(self,index):\n \n path, target = self.samples[index] \n sample = self.loader(path)\n if self.transform:\n if self.minorities and self.bal_tfms:\n if target in self.minorities:\n if hasattr(self.bal_tfms,'transforms'):\n for tr in self.bal_tfms.transforms:\n tr.p = self.diffs[target]\n l = [self.bal_tfms]\n l.extend(self.transform)\n self.tfms = transforms.Compose(l) \n else: \n for t in self.bal_tfms:\n t.p = self.diffs[target]\n self.tfms = transforms.Compose(self.bal_tfms + self.transform )\n else:\n self.tfms = transforms.Compose(self.transform)\n elif self.tta_tfms:\n self.tfms = self.tta_tfms\n else: \n self.tfms = transforms.Compose(self.transform)\n sample = self.tfms(sample)\n if self.target_transform:\n target = self.target_transform(target)\n return sample, target\n\ndef extract_data(dt):\n\n x = []\n y = []\n for a,b in dt:\n x.append(a)\n y.append(b)\n return x,y\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in os.listdir(d)] \n\ndef get_minorities(df,thresh=0.8):\n\n c = df.iloc[:,1].value_counts()\n lc = list(c)\n max_count = lc[0]\n diffs = [1-(x/max_count) for x in lc]\n diffs = dict((k,v) for k,v in zip(c.keys(),diffs))\n minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]\n return minorities,diffs\n\ndef csv_from_path(path, img_dest):\n\n path = Path(path)\n img_dest = Path(img_dest)\n labels_paths = list(path.iterdir())\n tr_images = []\n tr_labels = []\n for l in labels_paths:\n if l.is_dir():\n for i in list(l.iterdir()):\n if i.suffix in IMG_EXTENSIONS:\n name = i.name\n label = l.name\n new_name = '{}_{}'.format(path.name,name)\n new_path = img_dest/new_name\n# print(new_path)\n os.rename(i,new_path)\n tr_images.append(new_name)\n tr_labels.append(label) \n # os.rmdir(l)\n tr_img_label = {'Img':tr_images, 'Label': tr_labels}\n csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])\n csv = csv.sample(frac=1).reset_index(drop=True)\n return csv\n\ndef add_extension(a,e):\n a = [x+e for x in a]\n return a\n\ndef one_hot(targets, multi = False):\n if multi:\n binerizer = MultiLabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n else:\n binerizer = LabelBinarizer()\n dai_1hot = binerizer.fit_transform(targets)\n return dai_1hot,binerizer.classes_\n\ndef get_index(arr,a):\n for i in range(len(arr)):\n if sum(arr[i] == a) == len(a):\n return i\n return False\n\ndef rescale_bbox(bb,row_scale,col_scale):\n bb = bb.reshape((-1,4))\n for b in bb:\n r1,c1,r2,c2 = b\n b[0] = int(np.round(r1*col_scale))\n b[1] = int(np.round(c1*row_scale))\n b[2] = int(np.round(r2*col_scale))\n b[3] = int(np.round(c2*row_scale))\n\n # bb = torch.tensor([bb_hw(b) for b in bb.reshape(-1,4)])\n # for b in bb:\n # r1,c1,r2,c2 = b\n # b[0] = int(np.round(r1*row_scale))\n # b[1] = int(np.round(c1*col_scale))\n # b[2] = int(np.round(r2*row_scale))\n # b[3] = int(np.round(c2*col_scale))\n # if(sum(b)) == 1:\n # b[0],b[1],b[2],b[3] = 0,0,0,0\n\n bb = bb.reshape((1,-1)) \n return bb\n\ndef get_img_stats(dataset,sz):\n\n size = int(len(dataset)*sz)\n i = 0\n imgs = []\n for img,_ in dataset:\n # print(img.size())\n if i > size:\n break\n imgs.append(img)\n i+=1\n imgs_ = torch.stack(imgs,dim=3)\n imgs_ = imgs_.view(3,-1)\n imgs_mean = imgs_.mean(dim=1)\n imgs_std = imgs_.std(dim=1)\n return imgs_mean,imgs_std\n\ndef split_df(train_df,test_size = 0.15):\n try: \n train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])\n except:\n train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)\n train_df = train_df.reset_index(drop = True)\n val_df = val_df.reset_index(drop = True)\n return train_df,val_df \n\ndef save_obj(obj, path):\n with open(path, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)\n\nclass DataProcessor:\n \n def __init__(self, data_path = None, train_csv = None, val_csv = None, reg = False,\n tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):\n \n print('+------------------------------------+')\n print('| Dream AI |')\n print('+------------------------------------+')\n print()\n \n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n \n self.data_path,self.train_csv,self.val_csv,self.reg,self.tr_name,self.val_name,self.test_name,self.extension = (data_path,train_csv,\n val_csv,reg,tr_name,val_name,test_name,extension)\n \n self.obj = False\n self.multi_label = False\n \n if setup_data:\n self.set_up_data()\n \n def set_up_data(self,split_size = 0.15):\n\n data_path,train_csv,val_csv,tr_name,val_name,test_name = (self.data_path,self.train_csv,self.val_csv,self.tr_name,self.val_name,self.test_name)\n\n # check if paths given and also set paths\n \n if not data_path:\n data_path = os.getcwd() + '/'\n tr_path = os.path.join(data_path,tr_name)\n val_path = os.path.join(data_path,val_name)\n test_path = os.path.join(data_path,test_name)\n\n if os.path.exists(os.path.join(data_path,tr_name+'.csv')):\n train_csv = tr_name+'.csv'\n # if os.path.exists(os.path.join(data_path,val_name+'.csv')):\n # val_csv = val_name+'.csv'\n # if os.path.exists(os.path.join(data_path,test_name+'.csv')):\n # test_csv = test_name+'.csv' \n\n # paths to csv\n\n if not train_csv:\n print('no')\n train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)\n\n train_csv_path = os.path.join(data_path,train_csv)\n train_df = pd.read_csv(train_csv_path)\n if 'Unnamed: 0' in train_df.columns:\n train_df = train_df.drop('Unnamed: 0', 1)\n if len(train_df.columns) > 2:\n self.obj = True \n img_names = [str(x) for x in list(train_df.iloc[:,0])]\n if self.extension:\n img_names = add_extension(img_names,self.extension)\n if val_csv:\n val_csv_path = os.path.join(data_path,val_csv)\n val_df = pd.read_csv(val_csv_path)\n val_targets = list(map(str,list(val_df.iloc[:,1])))\n if test_csv:\n test_csv_path = os.path.join(data_path,test_csv)\n test_df = pd.read_csv(test_csv_path)\n test_targets = list(map(str,list(test_df.iloc[:,1]))) \n targets = list(map(str,list(train_df.iloc[:,1])))\n lengths = [len(t) for t in [s.split() for s in targets]]\n self.target_lengths = lengths\n split_targets = [t.split() for t in targets]\n if self.obj:\n print('\\nObject Detection\\n')\n\n # bounding boxes\n\n int_targets = [list(map(float,x)) for x in split_targets]\n zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)\n for i,t in enumerate(zero_targets):\n t[len(t)-len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]\n\n # one-hot classes\n\n obj_targets = list(map(str,list(train_df.iloc[:,2])))\n obj_split_targets = [t.split() for t in obj_targets]\n try:\n obj_split_targets = [list(map(int,x)) for x in obj_split_targets]\n except:\n pass\n dai_onehot,onehot_classes = one_hot(obj_split_targets,True)\n # train_df['one_hot'] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]\n\n # class indexes\n\n c_names = list(onehot_classes)\n class_idx = [[c_names.index(i) for i in c] for c in obj_split_targets]\n zero_idx = np.zeros((len(targets),max(lengths)//4),dtype=int)\n # print(zero_idx.shape)\n for i,t in enumerate(zero_idx):\n # temp_l = len(class_idx[i])\n # if temp_l > 90:\n # print(i,temp_l)\n t[len(t)-len(class_idx[i]):] = class_idx[i]\n zero_idx[i] = t\n train_df.iloc[:,2] = [torch.from_numpy(z).type(torch.LongTensor) for z in zero_idx]\n self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes\n # self.set_up_object_detection([4,2,1],[0.7, 1., 1.3],[(1.,1.), (1.,0.5), (0.5,1.)])\n\n elif self.reg:\n print('\\nRegression\\n')\n int_targets = [list(map(int,x)) for x in split_targets]\n zero_targets = np.zeros((len(targets),max(lengths)),dtype=int)\n for i,t in enumerate(zero_targets):\n t[len(t)-len(int_targets[i]):] = int_targets[i]\n zero_targets[i] = t\n train_df.iloc[:,1] = [torch.from_numpy(z).type(torch.FloatTensor) for z in zero_targets]\n self.data_dir,self.num_classes,self.class_names = data_path, max(lengths),np.unique(zero_targets,axis=1)\n elif lengths[1:] != lengths[:-1]:\n self.multi_label = True\n print('\\nMulti-label Classification\\n')\n try:\n split_targets = [list(map(int,x)) for x in split_targets]\n except:\n pass\n dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)\n train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]\n self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes\n else:\n print('\\nSingle-label Classification\\n')\n unique_targets = list(np.unique(targets))\n target_ids = [unique_targets.index(x) for x in targets]\n train_df.iloc[:,1] = target_ids\n if val_csv:\n target_ids = [unique_targets.index(x) for x in val_targets]\n val_df.iloc[:,1] = target_ids\n if test_csv:\n target_ids = [unique_targets.index(x) for x in test_targets]\n test_df.iloc[:,1] = target_ids \n self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets\n\n # self.models_path = os.path.join(self.data_dir, 'models')\n # os.makedirs(self.models_path,exist_ok=True)\n\n if not val_csv:\n train_df,val_df = split_df(train_df,split_size)\n if not test_csv: \n val_df,test_df = split_df(val_df,split_size)\n tr_images = [str(x) for x in list(train_df.iloc[:,0])]\n val_images = [str(x) for x in list(val_df.iloc[:,0])]\n test_images = [str(x) for x in list(test_df.iloc[:,0])]\n if self.extension:\n tr_images = add_extension(tr_images,self.extension)\n val_images = add_extension(val_images,self.extension)\n test_images = add_extension(test_images,self.extension)\n train_df.iloc[:,0] = tr_images\n val_df.iloc[:,0] = val_images\n test_df.iloc[:,0] = test_images\n train_df.to_csv(os.path.join(data_path,'train.csv'),index=False)\n val_df.to_csv(os.path.join(data_path,'val.csv'),index=False)\n test_df.to_csv(os.path.join(data_path,'test.csv'),index=False)\n self.minorities,self.class_diffs = None,None\n if (not self.obj) or (not self.multi_label):\n self.minorities,self.class_diffs = get_minorities(train_df)\n self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}\n data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,\n 'minorities':self.minorities,'class_diffs':self.class_diffs,'obj':self.obj,'multi_label':self.multi_label}\n # save_obj(data_dict,os.path.join(self.data_dir,'data_dict.pkl'))\n self.data_dict = data_dict\n return data_dict\n\n def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):\n \n train_df = csv_from_path(tr_path,tr_path)\n train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',None)\n if val_path is not None:\n val_exists = os.path.exists(val_path)\n if val_exists:\n val_df = csv_from_path(val_path,tr_path)\n val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',self.val_name+'.csv')\n if test_path is not None:\n test_exists = os.path.exists(test_path)\n if test_exists:\n test_df = csv_from_path(test_path,tr_path)\n test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)\n ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv') \n return ret\n \n def get_data(self, data_dict = None, s = (224,224), dataset = my_image_csv_dataset, bs = 32, balance = False, tfms = None,\n bal_tfms = None, tta = False, num_workers = 4, stats_percentage = 0.6):\n \n self.image_size = s\n if not data_dict:\n data_dict = self.data_dict\n data_dfs,data_dir,minorities,class_diffs,obj,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],data_dict['minorities'],\n data_dict['class_diffs'],data_dict['obj'],data_dict['multi_label'])\n if obj or multi_label:\n balance = False \n if tta:\n tta_tfms = {self.tr_name: transforms.Compose( \n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]), \n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n \n ]),\n self.val_name: transforms.Compose(\n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]),\n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n ]),\n self.test_name: transforms.Compose(\n [\n# transforms.TenCrop(s),\n transforms.FiveCrop(s[0]),\n transforms.Lambda(lambda crops:torch.stack([transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops:torch.stack(\n [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop) for crop in crops]))\n ])}\n# tta_tfms = {self.tr_name: transforms.Compose([\n# transforms.Resize(s),\n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]),\n# self.val_name: transforms.Compose([\n# transforms.Resize(s), \n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]) }\n \n else:\n tta_tfms = None\n \n if not bal_tfms:\n bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],\n \n self.val_name: None,\n self.test_name: None \n }\n else:\n bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}\n if obj:\n resize_transform = transforms.Resize(s)\n else:\n # resize_transform = transforms.RandomResizedCrop(s[0])\n resize_transform = transforms.Resize(s)\n if not tfms:\n tfms = [\n resize_transform,\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n else:\n \n tfms_temp = [\n resize_transform,\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n tfms_temp[1:1] = tfms\n tfms = tfms_temp\n print(tfms)\n \n data_transforms = {\n self.tr_name: tfms,\n self.val_name: [\n # transforms.Resize(s[0]+50),\n # transforms.CenterCrop(s[0]),\n transforms.Resize(s),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ],\n self.test_name: [\n # transforms.Resize(s[0]+50),\n # transforms.CenterCrop(s[0]),\n transforms.Resize(s),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n }\n\n temp_tfms = [resize_transform, transforms.ToTensor()]\n temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)\n self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)\n data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std\n data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std\n data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std\n\n if balance:\n image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],\n data_transforms[x],obj,minorities,class_diffs,bal_tfms[x])\n for x in [self.tr_name, self.val_name, self.test_name]} \n else:\n image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],\n data_transforms[x],obj)\n for x in [self.tr_name, self.val_name, self.test_name]}\n \n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,\n shuffle=True, num_workers=num_workers)\n for x in [self.tr_name, self.val_name, self.test_name]}\n dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}\n \n self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,\n dataset_sizes)\n \n return image_datasets,dataloaders,dataset_sizes\n\n def imshow(self,inp, title=None):\n \n \"\"\"Imshow for Tensor.\"\"\"\n inp = self.denorm_img(inp)\n plt.imshow(inp)\n if title:\n plt.title(title)\n plt.pause(0.001)\n\n def denorm_img(self,inp,calculate = False):\n\n inp = inp.numpy().transpose((1, 2, 0))\n if calculate:\n mean = np.mean(inp)\n std = np.std(inp)\n else: \n mean = self.img_mean.numpy()\n std = self.img_std.numpy()\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n return inp \n \n def show_data(self,folder_name = 'train', size = (64,64), bs = 5):\n \n self.get_data(size,bs)\n batch = next(iter(self.dataloaders[folder_name]))\n inputs, classes = batch[0],batch[1]\n out = torchvision.utils.make_grid(inputs)\n if self.reg:\n print(classes)\n self.imshow(out, title=[x for x in classes]) \n elif self.multi_label:\n self.imshow(out, title=[self.class_names[np.nonzero(x.type(torch.LongTensor))] for x in classes]) \n else: \n self.imshow(out, title=[self.class_names[x] for x in classes])\n\n # def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_colr = 12):\n\n # # print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'\n # # .format(anc_grids,anc_zooms,anc_ratios))\n # # print('If so, you may call the function \"set_up_object_detection\" with your own paramteres.')\n\n # cmap = get_cmap(num_colr)\n # self.colr_list = [cmap(float(x)) for x in range(num_colr)]\n # self.num_colr = num_colr\n # self.create_anchors(anc_grids,anc_zooms,anc_ratios)\n # self.custom_head = SSD_MultiHead(self.k,self.num_classes,0.45,-4.)\n # self.loss_f = FocalLoss(self.num_classes)\n\n # def create_anchors(self,anc_grids,anc_zooms,anc_ratios):\n \n # anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]\n # k = len(anchor_scales)\n # anc_offsets = [1/(o*2) for o in anc_grids]\n # anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)\n # for ao,ag in zip(anc_offsets,anc_grids)])\n # anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)\n # for ao,ag in zip(anc_offsets,anc_grids)])\n # anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)\n # anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])\n # for ag in anc_grids])\n # grid_sizes = torch.tensor(np.concatenate([np.array(\n # [ 1/ag for i in range(ag*ag) for o,p in anchor_scales])\n # for ag in anc_grids])).float().unsqueeze(1).to(self.device)\n # anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)\n # anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])\n # self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k \n\n\n\n\n\n\n\n\n",
"step-ids": [
15,
16,
19,
25,
29
]
}
|
[
15,
16,
19,
25,
29
] |
import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString, Polygon
mls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])
p = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])
results = mls.intersection(p)
plt.subplot(1, 2, 1)
for ls in mls:
plt.plot(*ls.xy)
plt.plot(*p.boundary.xy, "-.k")
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.subplot(1, 2, 2)
for ls in results:
plt.plot(*ls.xy)
plt.xlim([0, 5])
plt.ylim([0, 2])
plt.show()
|
normal
|
{
"blob_id": "9096ed4b68d2bef92df7db98589e744ddf3efad0",
"index": 350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n",
"step-3": "<mask token>\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nfrom shapely.geometry import MultiLineString, Polygon\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, '-.k')\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nfrom shapely.geometry import MultiLineString, Polygon\n\nmls = MultiLineString([[(0, 1), (5, 1)], [(1, 2), (1, 0)]])\np = Polygon([(0.5, 0.5), (0.5, 1.5), (2, 1.5), (2, 0.5)])\nresults = mls.intersection(p)\n\nplt.subplot(1, 2, 1)\nfor ls in mls:\n plt.plot(*ls.xy)\nplt.plot(*p.boundary.xy, \"-.k\")\nplt.xlim([0, 5])\nplt.ylim([0, 2])\n\nplt.subplot(1, 2, 2)\nfor ls in results:\n plt.plot(*ls.xy)\nplt.xlim([0, 5])\nplt.ylim([0, 2])\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.2.2 on 2019-10-19 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='phone_number',
field=models.CharField(max_length=15, verbose_name='phone number'),
),
]
|
normal
|
{
"blob_id": "7d25a8eb61b6fb9069616745c2b68fd3ceeca9fb",
"index": 6600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('account', '0001_initial')]\n operations = [migrations.AlterField(model_name='account', name=\n 'phone_number', field=models.CharField(max_length=15, verbose_name=\n 'phone number'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('account', '0001_initial')]\n operations = [migrations.AlterField(model_name='account', name=\n 'phone_number', field=models.CharField(max_length=15, verbose_name=\n 'phone number'))]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-10-19 14:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='phone_number',\n field=models.CharField(max_length=15, verbose_name='phone number'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.2.2 on 2021-05-07 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='teams',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('discipline', models.CharField(max_length=50)),
('amount', models.IntegerField()),
],
options={
'ordering': ['id'],
'unique_together': {('name', 'discipline', 'amount')},
},
),
]
|
normal
|
{
"blob_id": "e72962b644fab148741eb1c528d48ada45a43e51",
"index": 3978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='teams', fields=[('id',\n models.AutoField(primary_key=True, serialize=False)), ('name',\n models.CharField(max_length=50)), ('discipline', models.CharField(\n max_length=50)), ('amount', models.IntegerField())], options={\n 'ordering': ['id'], 'unique_together': {('name', 'discipline',\n 'amount')}})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='teams', fields=[('id',\n models.AutoField(primary_key=True, serialize=False)), ('name',\n models.CharField(max_length=50)), ('discipline', models.CharField(\n max_length=50)), ('amount', models.IntegerField())], options={\n 'ordering': ['id'], 'unique_together': {('name', 'discipline',\n 'amount')}})]\n",
"step-5": "# Generated by Django 3.2.2 on 2021-05-07 08:01\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='teams',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=50)),\n ('discipline', models.CharField(max_length=50)),\n ('amount', models.IntegerField()),\n ],\n options={\n 'ordering': ['id'],\n 'unique_together': {('name', 'discipline', 'amount')},\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
pil = 'y'
while(pil=='y'):
os.system("cls")
print("===============================")
print("== KALKULATOR SEDERHANA ==")
print("===============================")
print("MENU-UTAMA : ")
print("1 Penjumlahan")
print("2 Pengurangan")
print("3 Perkalian")
print("4 Pembagian")
def penjumlahan ():
print("PENJUMLAHAN DUA BUAH BILANGAN")
print("=============================")
x = float(input ("Bilangan pertama: "))
y = float(input ("Bilangan kedua : "))
print("-----------------------------")
print "Jumlah = ", x+y
def pengurangan ():
print("PENGURANGAN DUA BUAH BILANGAN")
print("=============================")
x = float(input("Bilangan pertama: "))
y = float(input("Bilangan kedua : "))
print("-----------------------------")
print "Jumlah = ", x-y
def perkalian ():
print("PERKALIAN DUA BUAH BILANGAN")
print("===========================")
x = float(input("Bilangan pertama: "))
y = float(input("Bilangan kedua : "))
print("---------------------------")
print "Jumlah = ", x*y
def pembagian ():
print("PEMBAGIAN DUA BUAH BILANGAN")
print("===========================")
x = float(input("Bilangan pertama: "))
y = float(input("Bilangan kedua : "))
print("---------------------------")
print "Jumlah = ", x/y
pilihan = int(input("Masukkan pilihan Anda(1,2,3, dan 4): "))
if (pilihan==1):
penjumlahan ()
elif (pilihan==2):
pengurangan ()
elif (pilihan==3):
perkalian ()
elif (pilihan==4):
pembagian ()
else:
print("Pilihan Anda salah")
pil = raw_input("ulang KALKULATOR lagi? (y): ")
|
normal
|
{
"blob_id": "9e7dee9c0fd4cd290f4710649ffc4a94fedf0358",
"index": 356,
"step-1": "import os\npil = 'y'\nwhile(pil=='y'):\n os.system(\"cls\")\n print(\"===============================\")\n print(\"== KALKULATOR SEDERHANA ==\")\n print(\"===============================\")\n print(\"MENU-UTAMA : \")\n print(\"1 Penjumlahan\")\n print(\"2 Pengurangan\")\n print(\"3 Perkalian\")\n print(\"4 Pembagian\")\n\n def penjumlahan ():\n print(\"PENJUMLAHAN DUA BUAH BILANGAN\")\n print(\"=============================\")\n x = float(input (\"Bilangan pertama: \"))\n y = float(input (\"Bilangan kedua : \"))\n print(\"-----------------------------\")\n print \"Jumlah = \", x+y\n def pengurangan ():\n print(\"PENGURANGAN DUA BUAH BILANGAN\")\n print(\"=============================\")\n x = float(input(\"Bilangan pertama: \"))\n y = float(input(\"Bilangan kedua : \"))\n print(\"-----------------------------\")\n print \"Jumlah = \", x-y\n def perkalian ():\n print(\"PERKALIAN DUA BUAH BILANGAN\")\n print(\"===========================\")\n x = float(input(\"Bilangan pertama: \"))\n y = float(input(\"Bilangan kedua : \"))\n print(\"---------------------------\")\n print \"Jumlah = \", x*y\n def pembagian ():\n print(\"PEMBAGIAN DUA BUAH BILANGAN\")\n print(\"===========================\")\n x = float(input(\"Bilangan pertama: \"))\n y = float(input(\"Bilangan kedua : \"))\n print(\"---------------------------\")\n print \"Jumlah = \", x/y\n pilihan = int(input(\"Masukkan pilihan Anda(1,2,3, dan 4): \"))\n if (pilihan==1):\n penjumlahan ()\n elif (pilihan==2):\n pengurangan ()\n elif (pilihan==3):\n perkalian ()\n elif (pilihan==4):\n pembagian ()\n else:\n print(\"Pilihan Anda salah\")\n pil = raw_input(\"ulang KALKULATOR lagi? (y): \")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import estimateGaussian as eg
import multivariateGaussian as mvg
import visualizeFit as vf
import selectThreshold as st
plt.ion()
# np.set_printoptions(formatter={'float': '{: 0.6f}'.format})
'''第1部分 加载示例数据集'''
#先通过一个小数据集进行异常检测 便于可视化
# 数据集包含两个特征
# 一些机器的等待时间和吞吐量 实验目的找出其中可能有异常的机器
print('Visualizing example dataset for outlier detection.')
data = scio.loadmat('ex8data1.mat')
X = data['X']#训练集样本特征矩阵
Xval = data['Xval'] #验证集样本特征矩阵
yval = data['yval'].flatten() #验证集样本标签 异常/正常
# 可视化样例训练集
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)') #x1等待时间
plt.ylabel('Throughput (mb/s') #x2吞吐量
input('Program paused. Press ENTER to continue')
'''第2部分 估计训练集的分布'''
# 假设数据集的各个特征服从高斯分布
print('Visualizing Gaussian fit.')
# 参数估计
mu, sigma2 = eg.estimate_gaussian(X)
# 计算训练集的概率分布
p = mvg.multivariate_gaussian(X, mu, sigma2)
#可视化训练集的概率分布 画出等高线图
vf.visualize_fit(X, mu, sigma2)
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
'''第3部分 基于验证集 得到一个最好的概率分布阈值'''
pval = mvg.multivariate_gaussian(Xval, mu, sigma2) #根据训练集的概率分布 得到验证集样本的概率
epsilon, f1 = st.select_threshold(yval, pval) #选择合适的概率阈值
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')
# 标出训练集中的异常值
outliers = np.where(p < epsilon)
plt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none', edgecolors='r')
input('Program paused. Press ENTER to continue')
'''第4部分 基于大数据集 进行异常检测(特征数很多)'''
data = scio.loadmat('ex8data2.mat')
X = data['X'] #训练集样本特征矩阵
Xval = data['Xval'] #验证集样本特征矩阵
yval = data['yval'].flatten() #验证集样本标签 1异常 0正常
#参数估计
mu, sigma2 = eg.estimate_gaussian(X)
# 计算训练集的概率分布
p = mvg.multivariate_gaussian(X, mu, sigma2)
# 得到验证集每个样本的概率
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
# 选择一个最好的阈值
epsilon, f1 = st.select_threshold(yval, pval)
#验证程序正确性
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('# Outliers found: {}'.format(np.sum(np.less(p, epsilon)))) #训练集上的异常样本数量
print('(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)')
input('ex8 Finished. Press ENTER to exit')
|
normal
|
{
"blob_id": "de6b9961e0572338c87802314e7ae3cded5168b4",
"index": 487,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.ion()\n<mask token>\nprint('Visualizing example dataset for outlier detection.')\n<mask token>\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Visualizing Gaussian fit.')\n<mask token>\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint(\n '(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\n<mask token>\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',\n edgecolors='r')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))\nprint(\n '(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'\n )\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-3": "<mask token>\nplt.ion()\n<mask token>\nprint('Visualizing example dataset for outlier detection.')\ndata = scio.loadmat('ex8data1.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Visualizing Gaussian fit.')\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint(\n '(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\noutliers = np.where(p < epsilon)\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',\n edgecolors='r')\ninput('Program paused. Press ENTER to continue')\n<mask token>\ndata = scio.loadmat('ex8data2.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))\nprint(\n '(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'\n )\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as scio\nimport estimateGaussian as eg\nimport multivariateGaussian as mvg\nimport visualizeFit as vf\nimport selectThreshold as st\nplt.ion()\n<mask token>\nprint('Visualizing example dataset for outlier detection.')\ndata = scio.loadmat('ex8data1.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Visualizing Gaussian fit.')\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint(\n '(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\noutliers = np.where(p < epsilon)\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',\n edgecolors='r')\ninput('Program paused. Press ENTER to continue')\n<mask token>\ndata = scio.loadmat('ex8data2.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))\nprint(\n '(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'\n )\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as scio\n\nimport estimateGaussian as eg\nimport multivariateGaussian as mvg\nimport visualizeFit as vf\nimport selectThreshold as st\n\nplt.ion()\n# np.set_printoptions(formatter={'float': '{: 0.6f}'.format})\n\n'''第1部分 加载示例数据集'''\n\n#先通过一个小数据集进行异常检测 便于可视化\n\n# 数据集包含两个特征 \n# 一些机器的等待时间和吞吐量 实验目的找出其中可能有异常的机器\n\n\nprint('Visualizing example dataset for outlier detection.')\n\n\ndata = scio.loadmat('ex8data1.mat')\nX = data['X']#训练集样本特征矩阵\nXval = data['Xval'] #验证集样本特征矩阵\nyval = data['yval'].flatten() #验证集样本标签 异常/正常 \n\n# 可视化样例训练集\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)') #x1等待时间\nplt.ylabel('Throughput (mb/s') #x2吞吐量\n\n\ninput('Program paused. Press ENTER to continue')\n\n'''第2部分 估计训练集的分布'''\n# 假设数据集的各个特征服从高斯分布\n\nprint('Visualizing Gaussian fit.')\n\n# 参数估计 \nmu, sigma2 = eg.estimate_gaussian(X)\n\n# 计算训练集的概率分布\np = mvg.multivariate_gaussian(X, mu, sigma2)\n#可视化训练集的概率分布 画出等高线图\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\n\ninput('Program paused. Press ENTER to continue')\n\n'''第3部分 基于验证集 得到一个最好的概率分布阈值'''\npval = mvg.multivariate_gaussian(Xval, mu, sigma2) #根据训练集的概率分布 得到验证集样本的概率\n\nepsilon, f1 = st.select_threshold(yval, pval) #选择合适的概率阈值\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\n\n# 标出训练集中的异常值\noutliers = np.where(p < epsilon)\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none', edgecolors='r')\n\ninput('Program paused. Press ENTER to continue')\n\n\n'''第4部分 基于大数据集 进行异常检测(特征数很多)'''\ndata = scio.loadmat('ex8data2.mat')\nX = data['X'] #训练集样本特征矩阵\nXval = data['Xval'] #验证集样本特征矩阵\nyval = data['yval'].flatten() #验证集样本标签 1异常 0正常\n\n#参数估计\nmu, sigma2 = eg.estimate_gaussian(X)\n\n# 计算训练集的概率分布\np = mvg.multivariate_gaussian(X, mu, sigma2)\n\n# 得到验证集每个样本的概率\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\n\n# 选择一个最好的阈值\nepsilon, f1 = st.select_threshold(yval, pval)\n\n#验证程序正确性\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon)))) #训练集上的异常样本数量\nprint('(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)')\n\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Multiple Linear Regression
# To set the working directory save this .py file where we have the Data.csv file
# and then press the Run button. This will automatically set the working directory.
# Importing the data from preprocessing data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('50_Startups.csv')
# iloc integer location based [rows, columns] : means all rows :-1 all columns except last one
X = dataset.iloc[:, :-1].values
# In python indexes are started from 0 and R starts from 1
y = dataset.iloc[:, 4].values
# Categorical Data
# Encoding Independent Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,3] = labelencoder_X.fit_transform(X[:,3])
onehotencoder = OneHotEncoder(categorical_features= [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding Dummy Variable Trap
X = X[:, 1:]
#In the above thing it The above column will start from 1 to end.
#Splitting the dataset into Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state =0)
# Feature Scaling
# For multi-comment line use """ This will not be executed """
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the model using Backword Elimination
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)
X_opt = X[:, [0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Omit the variables which have prob more than .95
X_opt = X[:, [0,1,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Omit the variables until you have P < SL
X_opt = X[:, [0,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0,3,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0,3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# End of Backward ELimination Algorithm
# I would like to visualize the performance of R&D vs Profit scale
|
normal
|
{
"blob_id": "4d722975b4ffc1bbfe7591e6ceccc758f67a5599",
"index": 6920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregressor.fit(X_train, y_train)\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n<mask token>\nregressor_OLS.summary()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n<mask token>\nlabelencoder_X = LabelEncoder()\nX[:, 3] = labelencoder_X.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features=[3])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n<mask token>\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=0)\n<mask token>\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\n<mask token>\nX = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:, 3] = labelencoder_X.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features=[3])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=0)\n<mask token>\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\nimport statsmodels.formula.api as sm\nX = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3, 5]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:, [0, 3]]\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nregressor_OLS.summary()\n",
"step-5": "# Multiple Linear Regression\n# To set the working directory save this .py file where we have the Data.csv file \n# and then press the Run button. This will automatically set the working directory.\n# Importing the data from preprocessing data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd \n\ndataset = pd.read_csv('50_Startups.csv')\n\n# iloc integer location based [rows, columns] : means all rows :-1 all columns except last one\nX = dataset.iloc[:, :-1].values\n\n# In python indexes are started from 0 and R starts from 1\ny = dataset.iloc[:, 4].values\n\n# Categorical Data\n# Encoding Independent Data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:,3] = labelencoder_X.fit_transform(X[:,3])\nonehotencoder = OneHotEncoder(categorical_features= [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n# Avoiding Dummy Variable Trap\nX = X[:, 1:] \n#In the above thing it The above column will start from 1 to end.\n\n#Splitting the dataset into Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state =0)\n\n# Feature Scaling\n# For multi-comment line use \"\"\" This will not be executed \"\"\" \n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\"\"\"\n\n# Fitting Multiple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n# Building the model using Backword Elimination\nimport statsmodels.formula.api as sm\nX = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)\nX_opt = X[:, [0,1,2,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# Omit the variables which have prob more than .95\nX_opt = X[:, [0,1,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# Omit the variables until you have P < SL\nX_opt = X[:, [0,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0,3,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0,3]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n# End of Backward ELimination Algorithm\n\n# I would like to visualize the performance of R&D vs Profit scale\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
from Crypto.PublicKey import RSA
# Part 1 - Building a Blockchain
class Blockchain:
#chain(emptylist) , farmer_details(emptylist), nodes(set), create_block(function to create the genesis block)
def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof = 1, previous_hash = '0')
self.nodes = set()
#It creates a dictionary block which contains index(length of chain+1),timestamp( by using the module datetime),
#Proof( passes as parameter),previous_hash(passed as parameter),
#Farmer_details(from self) and append this to the chain.
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
#It returns the last block of the chain.
def get_previous_block(self):
return self.chain[-1]
#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes.
#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
#- It returns the hash of the block using sha256
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function,
#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.
# if no, then chain is not valid.
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, rate_perkg) are available in the json file.
#If no, It returns that some elements are missing
# otherwise it calls the function add_farmer_details by passing the farmer details in the json file as parameter and
#returns the index of the block in which these details will be added.
@app.route('/add_farmerdetails', methods = ['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg','rate_perkg']
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json['crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
# Part 3 - Decentralizing our Blockchain
# Connecting new nodes
#It takes a Jason file as request and first check if it contains any node or not.
# If it contains the nodes then it calls the function blockchain.add_node .
#Then it returns the list of blockchain.nodes as response.
@app.route('/connect_node', methods = ['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node)
response = {'message': 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
#- It calls the function blockcain.replace_chain. If the chain is replaced
#it returns the response with a message that the nodes has the different chains so the chain has been replaced by the longest chain alongwith the blockchain.chain.
# Otherwise it returns the response with a message all good the chain is the longest one with the blockchain.chain .
#then it jsonify the response and returns it.
@app.route('/replace_chain', methods = ['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',
'new_chain': blockchain.chain}
else:
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
# Running the app
app.run(host = '0.0.0.0', port = 5001)
|
normal
|
{
"blob_id": "f8c222b1a84a092a3388cb801a88495bc227b1d5",
"index": 9748,
"step-1": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\n<mask token>\n\n\[email protected]('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\[email protected]('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\[email protected]('/print_chain', methods=['GET'])\ndef print_chain():\n chain_till_now = []\n for xblock in blockchain.chain:\n xcurrent_hash = blockchain.hash(xblock)\n if len(xblock['farmer_details']) == 0:\n chain_till_now.append({'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n else:\n l = len(xblock['farmer_details'])\n sum = ''\n l -= 1\n while l >= 0:\n sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum\n l -= 1\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode\n ()).hexdigest(), 'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n response = {'chain': chain_till_now, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n<mask token>\n\n\[email protected]('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\[email protected]('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\n<mask token>\n\n\[email protected]('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\[email protected]('/print_chain', methods=['GET'])\ndef print_chain():\n chain_till_now = []\n for xblock in blockchain.chain:\n xcurrent_hash = blockchain.hash(xblock)\n if len(xblock['farmer_details']) == 0:\n chain_till_now.append({'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n else:\n l = len(xblock['farmer_details'])\n sum = ''\n l -= 1\n while l >= 0:\n sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum\n l -= 1\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode\n ()).hexdigest(), 'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n response = {'chain': chain_till_now, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n<mask token>\n\n\[email protected]('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\[email protected]('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\[email protected]('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\[email protected]('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.farmer_details = []\n self.create_block(proof=1, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'farmer_details': self.farmer_details}\n self.farmer_details = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_farmerdetails(self, name, crop_name, quantity, rate):\n privatekey = RSA.generate(1024)\n publickey = privatekey.publickey()\n hash_of_transaction = hashlib.sha256((hashlib.sha256(name.encode())\n .hexdigest() + hashlib.sha256(crop_name.encode()).hexdigest() +\n hashlib.sha256(str(quantity).encode()).hexdigest() + hashlib.\n sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\n data = int(hash_of_transaction, 16)\n signature = pow(data, privatekey.d, privatekey.n)\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.\n encode()).hexdigest(), 'crop_name': hashlib.sha256(crop_name.\n encode()).hexdigest(), 'quantity_inkg': hashlib.sha256(str(\n quantity).encode()).hexdigest(), 'rate_perkg': hashlib.sha256(\n str(rate).encode()).hexdigest(), 'hash_of_transaction':\n hash_of_transaction, 'signature': signature})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n network = self.nodes\n longest_chain = None\n max_length = len(self.chain)\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n block = blockchain.create_block(proof, previous_hash)\n current_block = blockchain.get_previous_block()\n current_hash = blockchain.hash(current_block)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'], 'timestamp': block['timestamp'], 'proof':\n block['proof'], 'previous_hash': block['previous_hash'], 'farmer':\n block['farmer_details'], 'current_hash': current_hash}\n return jsonify(response), 200\n\n\[email protected]('/print_chain', methods=['GET'])\ndef print_chain():\n chain_till_now = []\n for xblock in blockchain.chain:\n xcurrent_hash = blockchain.hash(xblock)\n if len(xblock['farmer_details']) == 0:\n chain_till_now.append({'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n else:\n l = len(xblock['farmer_details'])\n sum = ''\n l -= 1\n while l >= 0:\n sum = xblock['farmer_details'][l]['hash_of_transaction'] + sum\n l -= 1\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode\n ()).hexdigest(), 'index': xblock['index'], 'timestamp':\n xblock['timestamp'], 'proof': xblock['proof'],\n 'previous_hash': xblock['previous_hash'], 'farmer': xblock[\n 'farmer_details'], 'current_hash': xcurrent_hash})\n response = {'chain': chain_till_now, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\[email protected]('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\[email protected]('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {'message':\n 'Houston, we have a problem. The Blockchain is not valid.'}\n return jsonify(response), 200\n\n\[email protected]('/add_farmerdetails', methods=['POST'])\ndef add_farmer_details():\n json = request.get_json()\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg', 'rate_perkg'\n ]\n if not all(key in json for key in farmer_keys):\n return 'Some elements of the farmer_details are missing', 400\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json[\n 'crop_name'], json['quantity_inkg'], json['rate_perkg'])\n response = {'message': f'These details will be added to Block {index}'}\n return jsonify(response), 201\n\n\[email protected]('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\[email protected]('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5001)\n",
"step-5": "\r\nimport datetime\r\nimport hashlib\r\nimport json\r\nfrom flask import Flask, jsonify, request\r\nimport requests\r\nfrom uuid import uuid4\r\nfrom urllib.parse import urlparse\r\nfrom Crypto.PublicKey import RSA\r\n\r\n# Part 1 - Building a Blockchain\r\n\r\nclass Blockchain:\r\n#chain(emptylist) , farmer_details(emptylist), nodes(set), create_block(function to create the genesis block)\r\n def __init__(self):\r\n self.chain = []\r\n self.farmer_details = []\r\n self.create_block(proof = 1, previous_hash = '0')\r\n self.nodes = set()\r\n#It creates a dictionary block which contains index(length of chain+1),timestamp( by using the module datetime),\r\n#Proof( passes as parameter),previous_hash(passed as parameter),\r\n#Farmer_details(from self) and append this to the chain.\r\n \r\n def create_block(self, proof, previous_hash):\r\n block = {'index': len(self.chain) + 1,\r\n 'timestamp': str(datetime.datetime.now()),\r\n 'proof': proof,\r\n 'previous_hash': previous_hash,\r\n 'farmer_details': self.farmer_details}\r\n self.farmer_details = []\r\n self.chain.append(block)\r\n return block\r\n#It returns the last block of the chain.\r\n def get_previous_block(self):\r\n return self.chain[-1]\r\n#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes. \r\n#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.\r\n def proof_of_work(self, previous_proof):\r\n new_proof = 1\r\n check_proof = False\r\n while check_proof is False:\r\n hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()\r\n if hash_operation[:4] == '0000':\r\n check_proof = True\r\n else:\r\n new_proof += 1\r\n return new_proof\r\n#- It returns the hash of the block using sha256 \r\n def hash(self, block):\r\n encoded_block = json.dumps(block, sort_keys = True).encode()\r\n return hashlib.sha256(encoded_block).hexdigest()\r\n#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function, \r\n#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.\r\n# if no, then chain is not valid. \r\n def is_chain_valid(self, chain):\r\n previous_block = chain[0]\r\n block_index = 1\r\n while block_index < len(chain):\r\n block = chain[block_index]\r\n if block['previous_hash'] != self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof = block['proof']\r\n hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()\r\n if hash_operation[:4] != '0000':\r\n return False\r\n previous_block = block\r\n block_index += 1\r\n return True\r\n#- It creates the private key using the RSA.generate(1024),then creates the public key,\r\n# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),\r\n#data( it is the hash of the transaction in the int form),\r\n#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).\r\n# Then it append a dictionary containing all these information in the hash format to the chain farmer_details \r\n#and returns the index of the new block. \r\n def add_farmerdetails(self, name, crop_name, quantity,rate):\r\n privatekey = RSA.generate(1024) \r\n publickey = privatekey.publickey() \r\n hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()\r\n data=int(hash_of_transaction,16)\r\n signature=pow(data,privatekey.d,privatekey.n)\r\n self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),\r\n 'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),\r\n 'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),\r\n 'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),\r\n 'hash_of_transaction': hash_of_transaction,\r\n 'signature': signature\r\n })\r\n previous_block = self.get_previous_block()\r\n return previous_block['index'] + 1\r\n#It takes the url using urlparse of the address and then adds this to the set nodes in the self.\r\n def add_node(self, address):\r\n parsed_url = urlparse(address)\r\n self.nodes.add(parsed_url.netloc)\r\n#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)\r\n# and replaces the current chain with the longest chain of all the nodes. \r\n def replace_chain(self):\r\n network = self.nodes\r\n longest_chain = None\r\n max_length = len(self.chain)\r\n for node in network:\r\n response = requests.get(f'http://{node}/get_chain')\r\n if response.status_code == 200:\r\n length = response.json()['length']\r\n chain = response.json()['chain']\r\n if length > max_length and self.is_chain_valid(chain):\r\n max_length = length\r\n longest_chain = chain\r\n if longest_chain:\r\n self.chain = longest_chain\r\n return True\r\n return False\r\n\r\n# Part 2 - Mining our Blockchain\r\n\r\n# Creating a Web App\r\napp = Flask(__name__)\r\n\r\n# Creating an address for the node on Port 5001\r\nnode_address = str(uuid4()).replace('-', '')\r\n\r\n# Creating a Blockchain\r\nblockchain = Blockchain()\r\n\r\n# Mining a new block\r\n#- It access the previous block by calling the function get_previous_block(), \r\n#then access the previous proof by previous_block[‘proof’],\r\n#then it creates a new proof by using the function proof_of_work(‘previous_proof’), \r\n#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),\r\n# then calls the function create_block( proof,previous_hash),then finds the hash of this block.\r\n# It creates a response containing all the details of the new block,jsonify it and returns it.\r\[email protected]('/mine_block', methods = ['GET'])\r\ndef mine_block():\r\n previous_block = blockchain.get_previous_block()\r\n previous_proof = previous_block['proof']\r\n proof = blockchain.proof_of_work(previous_proof)\r\n previous_hash = blockchain.hash(previous_block)\r\n #blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)\r\n block = blockchain.create_block(proof, previous_hash)\r\n current_block=blockchain.get_previous_block()\r\n current_hash=blockchain.hash(current_block)\r\n response = {'message': 'Congratulations, you just mined a block!',\r\n 'index': block['index'],\r\n 'timestamp': block['timestamp'],\r\n 'proof': block['proof'],\r\n 'previous_hash': block['previous_hash'],\r\n 'farmer': block['farmer_details'],\r\n 'current_hash': current_hash}\r\n return jsonify(response), 200\r\n\r\n# Getting the full Blockchain\r\n#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash \r\n#then check if the list farmer_details is empty or not, \r\n#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.\r\n# If the farmer_details list is not empty then it first finds the length of the list farmer_details \r\n#then it iterates over the length of the list farmer_details and appends the hash of transaction \r\n# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.\r\n# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.\r\n# Then, it appends this dictionary to the list chain till now.\r\n# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it. \r\n\r\[email protected]('/print_chain',methods=['GET'])\r\ndef print_chain():\r\n chain_till_now =[]\r\n for xblock in blockchain.chain:\r\n xcurrent_hash=blockchain.hash(xblock) \r\n if len(xblock['farmer_details'])==0:\r\n chain_till_now.append({'index': xblock['index'],\r\n 'timestamp': xblock['timestamp'],\r\n 'proof': xblock['proof'],\r\n 'previous_hash': xblock['previous_hash'],\r\n 'farmer': xblock['farmer_details'],\r\n 'current_hash': xcurrent_hash})\r\n else:\r\n l=len(xblock['farmer_details'])\r\n sum=\"\"\r\n l-=1\r\n while(l>=0):\r\n sum=xblock['farmer_details'][l]['hash_of_transaction']+sum\r\n l-=1\r\n chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),\r\n 'index': xblock['index'],\r\n 'timestamp': xblock['timestamp'],\r\n 'proof': xblock['proof'],\r\n 'previous_hash': xblock['previous_hash'],\r\n 'farmer': xblock['farmer_details'],\r\n 'current_hash': xcurrent_hash}) \r\n response = {'chain': chain_till_now,\r\n 'length': len(blockchain.chain)}\r\n return jsonify(response), 200\r\n\r\n#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it. \r\[email protected]('/get_chain', methods = ['GET'])\r\ndef get_chain():\r\n response = {'chain': blockchain.chain,\r\n 'length': len(blockchain.chain)}\r\n return jsonify(response), 200\r\n\r\n# Checking if the Blockchain is valid\r\n#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.\r\[email protected]('/is_valid', methods = ['GET'])\r\ndef is_valid():\r\n is_valid = blockchain.is_chain_valid(blockchain.chain)\r\n if is_valid:\r\n response = {'message': 'All good. The Blockchain is valid.'}\r\n else:\r\n response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}\r\n return jsonify(response), 200\r\n\r\n# Adding a new transaction to the Blockchain\r\n#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, rate_perkg) are available in the json file. \r\n#If no, It returns that some elements are missing\r\n# otherwise it calls the function add_farmer_details by passing the farmer details in the json file as parameter and \r\n#returns the index of the block in which these details will be added.\r\[email protected]('/add_farmerdetails', methods = ['POST'])\r\ndef add_farmer_details():\r\n json = request.get_json()\r\n farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg','rate_perkg']\r\n if not all(key in json for key in farmer_keys):\r\n return 'Some elements of the farmer_details are missing', 400\r\n index = blockchain.add_farmerdetails(json['name_of_farmer'], json['crop_name'], json['quantity_inkg'], json['rate_perkg'])\r\n response = {'message': f'These details will be added to Block {index}'}\r\n return jsonify(response), 201\r\n\r\n# Part 3 - Decentralizing our Blockchain\r\n\r\n# Connecting new nodes\r\n#It takes a Jason file as request and first check if it contains any node or not.\r\n# If it contains the nodes then it calls the function blockchain.add_node .\r\n#Then it returns the list of blockchain.nodes as response.\r\[email protected]('/connect_node', methods = ['POST'])\r\ndef connect_node():\r\n json = request.get_json()\r\n nodes = json.get('nodes')\r\n if nodes is None:\r\n return \"No node\", 400\r\n for node in nodes:\r\n blockchain.add_node(node)\r\n response = {'message': 'All the nodes are now connected. The puspesh Blockchain now contains the following nodes:',\r\n 'total_nodes': list(blockchain.nodes)}\r\n return jsonify(response), 201\r\n\r\n# Replacing the chain by the longest chain if needed\r\n#- It calls the function blockcain.replace_chain. If the chain is replaced \r\n#it returns the response with a message that the nodes has the different chains so the chain has been replaced by the longest chain alongwith the blockchain.chain.\r\n# Otherwise it returns the response with a message all good the chain is the longest one with the blockchain.chain .\r\n#then it jsonify the response and returns it.\r\[email protected]('/replace_chain', methods = ['GET'])\r\ndef replace_chain():\r\n is_chain_replaced = blockchain.replace_chain()\r\n if is_chain_replaced:\r\n response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',\r\n 'new_chain': blockchain.chain}\r\n else:\r\n response = {'message': 'All good. The chain is the largest one.',\r\n 'actual_chain': blockchain.chain}\r\n return jsonify(response), 200\r\n\r\n# Running the app\r\napp.run(host = '0.0.0.0', port = 5001)\r\n",
"step-ids": [
13,
15,
16,
18,
21
]
}
|
[
13,
15,
16,
18,
21
] |
def find_happy_number(num):
slow, fast = num, num
while True:
slow = find_square_sum(slow) # move one step
fast = find_square_sum(find_square_sum(fast)) # move two steps
if slow == fast: # found the cycle
break
return slow == 1 # see if the cycle is stuck on the number '1'
def find_square_sum(num):
_sum = 0
while (num > 0):
digit = num % 10
_sum += digit * digit
num //= 10
return _sum
print(find_happy_number(23))
print(find_happy_number(12))
|
normal
|
{
"blob_id": "60b5e515c7275bfa0f79e22f54302a578c2f7b79",
"index": 728,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_square_sum(num):\n _sum = 0\n while num > 0:\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\n\n<mask token>\n",
"step-3": "def find_happy_number(num):\n slow, fast = num, num\n while True:\n slow = find_square_sum(slow)\n fast = find_square_sum(find_square_sum(fast))\n if slow == fast:\n break\n return slow == 1\n\n\ndef find_square_sum(num):\n _sum = 0\n while num > 0:\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\n\n<mask token>\n",
"step-4": "def find_happy_number(num):\n slow, fast = num, num\n while True:\n slow = find_square_sum(slow)\n fast = find_square_sum(find_square_sum(fast))\n if slow == fast:\n break\n return slow == 1\n\n\ndef find_square_sum(num):\n _sum = 0\n while num > 0:\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\n\nprint(find_happy_number(23))\nprint(find_happy_number(12))\n",
"step-5": "def find_happy_number(num):\n slow, fast = num, num\n while True:\n slow = find_square_sum(slow) # move one step\n fast = find_square_sum(find_square_sum(fast)) # move two steps\n if slow == fast: # found the cycle\n break\n return slow == 1 # see if the cycle is stuck on the number '1'\n\n\ndef find_square_sum(num):\n _sum = 0\n while (num > 0):\n digit = num % 10\n _sum += digit * digit\n num //= 10\n return _sum\n\nprint(find_happy_number(23)) \nprint(find_happy_number(12))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import datetime
import scrapy
from ScrapyProject.items import ScrapyItem
class ThalesSpider(scrapy.Spider):
#item_id = ScrapyItem()
name = 'thales'
allowed_domains = ['https://www.thalesgroup.com']
start_urls = [('https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d' %i ) for i in range(0,30)]
def parse(self, response):
# iterate entries
for entry in response.css('div.big__list__item__info'):
#retrieve info for our current post
item = ScrapyItem()
item['source'] = 'thales'
item['date'] = 'NotAvalaible'
item['brief'] = entry.css('div.field__item even::text').extract_first()
item['url'] = entry.css('a::attr(href)').extract_first()
item['title'] = entry.css('a::text').extract_first()
# check time
now = datetime.datetime.now()
item['tstamp'] = now
print(item)
yield item
|
normal
|
{
"blob_id": "fd1b871c5cf79874acf8d5c4f1f73f7a381e23f7",
"index": 8278,
"step-1": "<mask token>\n\n\nclass ThalesSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ThalesSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n for entry in response.css('div.big__list__item__info'):\n item = ScrapyItem()\n item['source'] = 'thales'\n item['date'] = 'NotAvalaible'\n item['brief'] = entry.css('div.field__item even::text'\n ).extract_first()\n item['url'] = entry.css('a::attr(href)').extract_first()\n item['title'] = entry.css('a::text').extract_first()\n now = datetime.datetime.now()\n item['tstamp'] = now\n print(item)\n yield item\n",
"step-3": "<mask token>\n\n\nclass ThalesSpider(scrapy.Spider):\n name = 'thales'\n allowed_domains = ['https://www.thalesgroup.com']\n start_urls = [(\n 'https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d'\n % i) for i in range(0, 30)]\n\n def parse(self, response):\n for entry in response.css('div.big__list__item__info'):\n item = ScrapyItem()\n item['source'] = 'thales'\n item['date'] = 'NotAvalaible'\n item['brief'] = entry.css('div.field__item even::text'\n ).extract_first()\n item['url'] = entry.css('a::attr(href)').extract_first()\n item['title'] = entry.css('a::text').extract_first()\n now = datetime.datetime.now()\n item['tstamp'] = now\n print(item)\n yield item\n",
"step-4": "import datetime\nimport scrapy\nfrom ScrapyProject.items import ScrapyItem\n\n\nclass ThalesSpider(scrapy.Spider):\n name = 'thales'\n allowed_domains = ['https://www.thalesgroup.com']\n start_urls = [(\n 'https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d'\n % i) for i in range(0, 30)]\n\n def parse(self, response):\n for entry in response.css('div.big__list__item__info'):\n item = ScrapyItem()\n item['source'] = 'thales'\n item['date'] = 'NotAvalaible'\n item['brief'] = entry.css('div.field__item even::text'\n ).extract_first()\n item['url'] = entry.css('a::attr(href)').extract_first()\n item['title'] = entry.css('a::text').extract_first()\n now = datetime.datetime.now()\n item['tstamp'] = now\n print(item)\n yield item\n",
"step-5": "# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\n\nimport datetime\nimport scrapy\nfrom ScrapyProject.items import ScrapyItem\n\nclass ThalesSpider(scrapy.Spider):\n\t#item_id = ScrapyItem()\n\tname = 'thales'\n\n\n\tallowed_domains = ['https://www.thalesgroup.com']\n\n\tstart_urls = [('https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d' %i ) for i in range(0,30)]\n\n\tdef parse(self, response):\n # iterate entries\n\n\n\t\tfor entry in response.css('div.big__list__item__info'):\n\n #retrieve info for our current post\n\t\t\titem = ScrapyItem()\n\n\t\t\titem['source'] = 'thales'\n\t\t\titem['date'] = 'NotAvalaible'\n\t\t\titem['brief'] = entry.css('div.field__item even::text').extract_first()\n\t\t\titem['url'] = entry.css('a::attr(href)').extract_first()\n\t\t\titem['title'] = entry.css('a::text').extract_first()\n\n\t\t\t# check time\n\t\t\tnow = datetime.datetime.now()\n\t\t\titem['tstamp'] = now\n\n\t\t\tprint(item)\n\n\t\t\tyield item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import forms
from .models import User,Profile
from django.contrib.auth.forms import UserCreationForm
class ProfileForm(forms.ModelForm):
''' Form for the profile '''
class Meta:
model = Profile
exclude = ('user',) ## we will create the user with the signals
class SignUpForm(UserCreationForm):
''' Sign up form fetching form the User creation form
and the email and password is necessary not the user '''
class Meta:
model = User
fields = ('email','password1','password2')
|
normal
|
{
"blob_id": "7c3569c43d27ba605c0dba420690e18d7f849965",
"index": 7372,
"step-1": "<mask token>\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-2": "<mask token>\n\n\nclass ProfileForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Profile\n exclude = 'user',\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-3": "<mask token>\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\" Form for the profile \"\"\"\n\n\n class Meta:\n model = Profile\n exclude = 'user',\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-4": "from django import forms\nfrom .models import User, Profile\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\" Form for the profile \"\"\"\n\n\n class Meta:\n model = Profile\n exclude = 'user',\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-5": "from django import forms\nfrom .models import User,Profile\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass ProfileForm(forms.ModelForm):\n ''' Form for the profile '''\n class Meta:\n model = Profile\n exclude = ('user',) ## we will create the user with the signals\n\n\n\n\nclass SignUpForm(UserCreationForm):\n ''' Sign up form fetching form the User creation form\n and the email and password is necessary not the user '''\n class Meta:\n model = User\n fields = ('email','password1','password2')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Neuraxle Tensorflow V1 Utility classes
=========================================
Neuraxle utility classes for tensorflow v1.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tensorflow as tf
from neuraxle.base import BaseSaver, BaseStep, ExecutionContext
from neuraxle.hyperparams.space import HyperparameterSamples, HyperparameterSpace
from neuraxle_tensorflow.tensorflow import BaseTensorflowModelStep
class TensorflowV1ModelStep(BaseTensorflowModelStep):
"""
Base class for tensorflow 1 steps.
It uses :class:`TensorflowV1StepSaver` for saving the model.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,
:class:`~neuraxle.base.BaseStep`
"""
HYPERPARAMS = HyperparameterSamples({})
HYPERPARAMS_SPACE = HyperparameterSpace({})
def __init__(
self,
create_graph,
create_loss,
create_optimizer,
create_feed_dict=None,
data_inputs_dtype=None,
expected_outputs_dtype=None,
variable_scope=None,
has_expected_outputs=True,
print_loss=False,
print_func=None
):
BaseTensorflowModelStep.__init__(
self,
create_model=create_graph,
create_loss=create_loss,
create_optimizer=create_optimizer,
create_inputs=create_feed_dict,
data_inputs_dtype=data_inputs_dtype,
expected_outputs_dtype=expected_outputs_dtype,
step_saver=TensorflowV1StepSaver(),
print_loss=print_loss,
print_func=print_func
)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) -> BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) -> BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) -> BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {
self['data_inputs']: data_inputs
}
if self.has_expected_outputs:
feed_dict.update({
self['expected_outputs']: expected_outputs
})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self, data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']], feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {
self['data_inputs']: data_inputs
}
results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
def _get_inference_output_name(self):
"""
Return the output tensor name for inference (transform).
In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.
:return:
"""
inference_output_name = 'output'
if len(self['inference_output'].get_shape().as_list()) > 0:
inference_output_name = 'inference_output'
return inference_output_name
def __getitem__(self, item):
"""
Get a graph tensor by name using get item.
:param item: tensor name
:type item: str
:return: tensor
:rtype: tf.Tensor
"""
if ":" in item:
split = item.split(":")
tensor_name = split[0]
device = split[1]
else:
tensor_name = item
device = "0"
try:
result = self.graph.get_tensor_by_name("{0}/{1}:{2}".format(self.variable_scope, tensor_name, device))
except KeyError:
result = None
if result is None:
try:
result = self.graph.get_operation_by_name("{0}/{1}".format(self.variable_scope, tensor_name))
except KeyError:
result = tf.get_variable(tensor_name, [])
return result
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context, step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.meta".format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.index".format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), "{0}.ckpt".format(step.get_name()))
|
normal
|
{
"blob_id": "76a22408bb423d9a5bc5bc007decdbc7c6cc98f7",
"index": 8397,
"step-1": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n <mask token>\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-2": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n\n def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n feed_dict = {self['data_inputs']: data_inputs}\n results = self.session.run([self[inference_output_name], self[\n 'loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n return results[0]\n <mask token>\n <mask token>\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-3": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n\n def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n feed_dict = {self['data_inputs']: data_inputs}\n results = self.session.run([self[inference_output_name], self[\n 'loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n return results[0]\n\n def _get_inference_output_name(self):\n \"\"\"\n Return the output tensor name for inference (transform).\n In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.\n\n :return:\n \"\"\"\n inference_output_name = 'output'\n if len(self['inference_output'].get_shape().as_list()) > 0:\n inference_output_name = 'inference_output'\n return inference_output_name\n <mask token>\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-4": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n \"\"\"\n Base class for tensorflow 1 steps.\n It uses :class:`TensorflowV1StepSaver` for saving the model.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,\n :class:`~neuraxle.base.BaseStep`\n \"\"\"\n HYPERPARAMS = HyperparameterSamples({})\n HYPERPARAMS_SPACE = HyperparameterSpace({})\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n\n def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n feed_dict = {self['data_inputs']: data_inputs}\n results = self.session.run([self[inference_output_name], self[\n 'loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n return results[0]\n\n def _get_inference_output_name(self):\n \"\"\"\n Return the output tensor name for inference (transform).\n In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.\n\n :return:\n \"\"\"\n inference_output_name = 'output'\n if len(self['inference_output'].get_shape().as_list()) > 0:\n inference_output_name = 'inference_output'\n return inference_output_name\n\n def __getitem__(self, item):\n \"\"\"\n Get a graph tensor by name using get item.\n\n :param item: tensor name\n :type item: str\n\n :return: tensor\n :rtype: tf.Tensor\n \"\"\"\n if ':' in item:\n split = item.split(':')\n tensor_name = split[0]\n device = split[1]\n else:\n tensor_name = item\n device = '0'\n try:\n result = self.graph.get_tensor_by_name('{0}/{1}:{2}'.format(\n self.variable_scope, tensor_name, device))\n except KeyError:\n result = None\n if result is None:\n try:\n result = self.graph.get_operation_by_name('{0}/{1}'.format(\n self.variable_scope, tensor_name))\n except KeyError:\n result = tf.get_variable(tensor_name, [])\n return result\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-5": "\"\"\"\nNeuraxle Tensorflow V1 Utility classes\n=========================================\nNeuraxle utility classes for tensorflow v1.\n\n..\n Copyright 2019, Neuraxio Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\nimport os\n\nimport tensorflow as tf\nfrom neuraxle.base import BaseSaver, BaseStep, ExecutionContext\nfrom neuraxle.hyperparams.space import HyperparameterSamples, HyperparameterSpace\n\nfrom neuraxle_tensorflow.tensorflow import BaseTensorflowModelStep\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n \"\"\"\n Base class for tensorflow 1 steps.\n It uses :class:`TensorflowV1StepSaver` for saving the model.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,\n :class:`~neuraxle.base.BaseStep`\n \"\"\"\n HYPERPARAMS = HyperparameterSamples({})\n HYPERPARAMS_SPACE = HyperparameterSpace({})\n\n def __init__(\n self,\n create_graph,\n create_loss,\n create_optimizer,\n create_feed_dict=None,\n data_inputs_dtype=None,\n expected_outputs_dtype=None,\n variable_scope=None,\n has_expected_outputs=True,\n print_loss=False,\n print_func=None\n ):\n BaseTensorflowModelStep.__init__(\n self,\n create_model=create_graph,\n create_loss=create_loss,\n create_optimizer=create_optimizer,\n create_inputs=create_feed_dict,\n data_inputs_dtype=data_inputs_dtype,\n expected_outputs_dtype=expected_outputs_dtype,\n step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss,\n print_func=print_func\n )\n\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) -> BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)\n\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')\n\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) -> BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n\n return self\n\n def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) -> BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {\n self['data_inputs']: data_inputs\n }\n\n if self.has_expected_outputs:\n feed_dict.update({\n self['expected_outputs']: expected_outputs\n })\n\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self, data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n\n results = self.session.run([self['optimizer'], self['loss']], feed_dict=feed_dict)\n\n loss = results[1]\n self.add_new_loss(loss)\n\n return self\n\n def transform(self, data_inputs, expected_outputs=None) -> 'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n\n feed_dict = {\n self['data_inputs']: data_inputs\n }\n\n results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n\n return results[0]\n\n def _get_inference_output_name(self):\n \"\"\"\n Return the output tensor name for inference (transform).\n In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.\n\n :return:\n \"\"\"\n inference_output_name = 'output'\n if len(self['inference_output'].get_shape().as_list()) > 0:\n inference_output_name = 'inference_output'\n\n return inference_output_name\n\n def __getitem__(self, item):\n \"\"\"\n Get a graph tensor by name using get item.\n\n :param item: tensor name\n :type item: str\n\n :return: tensor\n :rtype: tf.Tensor\n \"\"\"\n if \":\" in item:\n split = item.split(\":\")\n tensor_name = split[0]\n device = split[1]\n else:\n tensor_name = item\n device = \"0\"\n\n try:\n result = self.graph.get_tensor_by_name(\"{0}/{1}:{2}\".format(self.variable_scope, tensor_name, device))\n except KeyError:\n result = None\n\n if result is None:\n try:\n result = self.graph.get_operation_by_name(\"{0}/{1}\".format(self.variable_scope, tensor_name))\n except KeyError:\n result = tf.get_variable(tensor_name, [])\n\n return result\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context, step))\n\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.meta\".format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.index\".format(step.get_name())))\n\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), \"{0}.ckpt\".format(step.get_name()))\n",
"step-ids": [
13,
15,
16,
19,
21
]
}
|
[
13,
15,
16,
19,
21
] |
import pygame
import sys
import time
import random
from snake_gym.envs.modules import *
from pygame.locals import *
import numpy as np
class SnakeGame(object):
def __init__(self):
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
self.surface.fill((255, 255, 255))
self.clock = pygame.time.Clock()
self.fps = 60
self.done = False
pygame.key.set_repeat(1, 40)
self.screen.blit(self.surface, (0, 0))
pygame.init()
self.fpsClock = pygame.time.Clock()
self.snake = Snake()
self.apple = Apple()
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
@staticmethod
def _get_image(surface):
ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,
SCREEN_WIDTH))))
for j in range(SCREEN_HEIGHT):
for k in range(SCREEN_WIDTH):
ret[j][k] = surface.get_at((k, j))
return np.array(ret)
|
normal
|
{
"blob_id": "6d61df9ac072100d01a1ce3cf7b4c056f66a163c",
"index": 502,
"step-1": "<mask token>\n\n\nclass SnakeGame(object):\n <mask token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SnakeGame(object):\n <mask token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-3": "<mask token>\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-4": "import pygame\nimport sys\nimport time\nimport random\nfrom snake_gym.envs.modules import *\nfrom pygame.locals import *\nimport numpy as np\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
from django.http import JsonResponse
from django.shortcuts import render
from phone_number_parser.forms import TextForm
import re
def parse_text(request):
###########################################################################
#
# Parse Text is the lone view for this project. A GET request renders a
# form with one textarea field. A POST of this form passes the text via an
# ajax call in the field 'the_text'. The text is parsed using REGEX for
# phone numbers and passed back as a JSON object.
# See main.js for the ajax request and success callback function.
#
###########################################################################
if request.method == 'POST':
text = request.POST.get('the_text')
phone_number_list = []
matches = re.findall(r'\(?(\d{3})\)?[\.\-]?\s*(\d{3})\s*[\.\-]?\s*(\d{4})', text)
for match in matches:
phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))
response_data = {'phone_number_list': phone_number_list}
return JsonResponse(response_data)
else:
form = TextForm()
return render(request, 'phone_number_parser/index.html', {'form': form})
|
normal
|
{
"blob_id": "d27a7ca04e12d50aca5a9f9db199102dbeb4e9f1",
"index": 7678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_text(request):\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(\n '\\\\(?(\\\\d{3})\\\\)?[\\\\.\\\\-]?\\\\s*(\\\\d{3})\\\\s*[\\\\.\\\\-]?\\\\s*(\\\\d{4})',\n text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1],\n match[2]))\n response_data = {'phone_number_list': phone_number_list}\n return JsonResponse(response_data)\n else:\n form = TextForm()\n return render(request, 'phone_number_parser/index.html', {'form': form}\n )\n",
"step-3": "from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom phone_number_parser.forms import TextForm\nimport re\n\n\ndef parse_text(request):\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(\n '\\\\(?(\\\\d{3})\\\\)?[\\\\.\\\\-]?\\\\s*(\\\\d{3})\\\\s*[\\\\.\\\\-]?\\\\s*(\\\\d{4})',\n text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1],\n match[2]))\n response_data = {'phone_number_list': phone_number_list}\n return JsonResponse(response_data)\n else:\n form = TextForm()\n return render(request, 'phone_number_parser/index.html', {'form': form}\n )\n",
"step-4": "from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom phone_number_parser.forms import TextForm\nimport re\n\n\ndef parse_text(request):\n ###########################################################################\n #\n # Parse Text is the lone view for this project. A GET request renders a\n # form with one textarea field. A POST of this form passes the text via an\n # ajax call in the field 'the_text'. The text is parsed using REGEX for\n # phone numbers and passed back as a JSON object.\n # See main.js for the ajax request and success callback function.\n #\n ###########################################################################\n\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(r'\\(?(\\d{3})\\)?[\\.\\-]?\\s*(\\d{3})\\s*[\\.\\-]?\\s*(\\d{4})', text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))\n\n response_data = {'phone_number_list': phone_number_list}\n\n return JsonResponse(response_data)\n\n else:\n form = TextForm()\n\n return render(request, 'phone_number_parser/index.html', {'form': form})\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from datetime import date
def diff_in_date(first, second):
value = str(second - first)
if value.__contains__(','):
generated_sum = value.split(',')
return generated_sum[0]
else:
return value
first_date = date(2014, 7, 2)
second_date = date(2014, 7, 11)
current_date = date.today()
val = diff_in_date(first_date, second_date)
print(val)
newVal = diff_in_date(second_date, current_date)
print(newVal)
|
normal
|
{
"blob_id": "9b6d30a40bafa0e9e4760843d6a2f750f0f88a57",
"index": 6106,
"step-1": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\n<mask token>\nprint(val)\n<mask token>\nprint(newVal)\n",
"step-3": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\nfirst_date = date(2014, 7, 2)\nsecond_date = date(2014, 7, 11)\ncurrent_date = date.today()\nval = diff_in_date(first_date, second_date)\nprint(val)\nnewVal = diff_in_date(second_date, current_date)\nprint(newVal)\n",
"step-4": "from datetime import date\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\nfirst_date = date(2014, 7, 2)\nsecond_date = date(2014, 7, 11)\ncurrent_date = date.today()\nval = diff_in_date(first_date, second_date)\nprint(val)\nnewVal = diff_in_date(second_date, current_date)\nprint(newVal)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import requests
import json
import datetime
from bs4 import BeautifulSoup
from pymongo import MongoClient, UpdateOne
import sys
#usage: python freesound_crawler.py [from_page] [to_page]
SOUND_URL = "https://freesound.org/apiv2/sounds/"
SEARCH_URL = "https://freesound.org/apiv2/search/text/"
AUTORIZE_URL = "https://freesound.org/apiv2/oauth2/authorize"
#freesound account imformation
from freesound_account_info import *
#mongo db imformation
from mongodb_info import *
error = []
MAX_PAGE = 24086
#connect to mongodb, return None if connection failure
def getDB():
try:
client = MongoClient('mongodb://%s:%s@%s:%s/edudata' % (MONGO_USER, MONGO_PASSWORD, MONGO_HOST, MONGO_PORT))
client.server_info()
db = client.edudata
return db.freesound
except Exception as e:
print "Unexpected error:", e
return None
#send request with access token
def sendRequest(url, token):
try:
header = {'Authorization' : "Bearer " + token};
res = requests.get(url, headers = header);
return json.loads( res.text )
except Exception as e:
print "Failed to send request(" , url, "):", e
error.append({'url':url, 'type':'send request'})
return None
def getMaxPage(token):
data = sendRequest(SEARCH_URL,token)
try:
return data['count']/ 15 + 1
except:
print ("Failed to update max page")
return MAX_PAGE
#get sound info with access token
def getSoundInfo( sound_id, token ):
try:
data = {}
sound_data = sendRequest(SOUND_URL + str(sound_id), token)
if sound_data is None:
raise Exception('json is none')
data['_id'] = sound_data[ 'id' ];
data['url'] = sound_data[ 'url' ];
data['title'] = sound_data[ 'name' ];
data['creator'] = sound_data[ 'username' ];
data['createdate'] = sound_data[ 'created' ];
data['description'] = sound_data[ 'description' ];
data['download_url'] = sound_data['download']
data['keyword'] = []
for tag in sound_data[ 'tags' ]:
data['keyword'].append(tag)
data['previews'] = []
for i in sound_data['previews'].keys():
data['previews'].append({i:sound_data['previews'][i]})
data['type'] = sound_data[ 'type' ];
data['bitrate'] = sound_data[ 'bitrate' ];
data['channels'] = sound_data[ 'channels' ];
data['downlaod'] = sound_data[ 'num_downloads' ];
data['license'] = sound_data[ 'license' ];
data['filesize'] = sound_data[ 'filesize' ];
return data;
except Exception as e:
print "Error occurs while getting sound info", sound_id, ": ", sys.exc_info()
print sound_data
return None
#execute queries
def insertDB( db, query):
if query is not None:
result = db.bulk_write(query, ordered = False)
print result.bulk_api_result
def crawling(token, db, page=1, page_to = MAX_PAGE):
header = {'Authorization' : "Bearer " + token};
print "From page", page, "to page", page_to
for i in range(page, page_to + 1):
if i > MAX_PAGE:
print "Meet max page", MAX_PAGE
break;
url = SEARCH_URL + "?page=" + str(i)
list_data = sendRequest(url, token)
try:
update_queries = []
for d in list_data['results']:
data = getSoundInfo( d['id'], token);
if data is None:
error.append({'id': d['id']});
continue
print data
cuurent_time = datetime.datetime.utcnow();
data['update_at'] = cuurent_time
update_queries.append(UpdateOne({'_id':data['_id']}, {'$set': data, '$setOnInsert':{'created_at':cuurent_time}},True))
if db is not None:
insertDB(db, update_queries)
print "Page", i, "is Done"
except Exception as e:
print "Error in page", i, ":", e
error.append({'Exception':e, 'type':'parse data', 'data':list_data})
print list_data
page += 1
if __name__ == '__main__':
db = getDB();
if db is None:
print "No db connected"
exit()
ACCESS_TOKEN = getAccessToken();
if ACCESS_TOKEN is None:
print "Can't get access token"
exit()
MAX_PAGE = getMaxPage(ACCESS_TOKEN)
from_page = 1
to_page = MAX_PAGE
if len(sys.argv) > 1:
from_page = int(sys.argv[1])
if len(sys.argv) > 2:
to_page = int(sys.argv[2])
crawling(ACCESS_TOKEN, db, from_page, to_page)
print "Error log: ",error
|
normal
|
{
"blob_id": "2294dc21ede759e755e51471705fa8ef784528a7",
"index": 8707,
"step-1": "import requests\nimport json\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient, UpdateOne\nimport sys\n\n#usage: python freesound_crawler.py [from_page] [to_page]\n\nSOUND_URL = \"https://freesound.org/apiv2/sounds/\"\nSEARCH_URL = \"https://freesound.org/apiv2/search/text/\"\nAUTORIZE_URL = \"https://freesound.org/apiv2/oauth2/authorize\"\n\n#freesound account imformation\nfrom freesound_account_info import * \n\n#mongo db imformation\nfrom mongodb_info import * \n\nerror = []\nMAX_PAGE = 24086\n\n#connect to mongodb, return None if connection failure\ndef getDB():\n\ttry:\n\t\tclient = MongoClient('mongodb://%s:%s@%s:%s/edudata' % (MONGO_USER, MONGO_PASSWORD, MONGO_HOST, MONGO_PORT))\n\t\tclient.server_info()\n\t\tdb = client.edudata\n\t\treturn db.freesound\n\texcept Exception as e:\n\t\tprint \"Unexpected error:\", e\n\t\treturn None\n\n\n\n#send request with access token\ndef sendRequest(url, token):\n\ttry:\n\t\theader = {'Authorization' : \"Bearer \" + token};\n\t\tres = requests.get(url, headers = header);\n\t\treturn json.loads( res.text )\n\texcept Exception as e:\n\t\tprint \"Failed to send request(\" , url, \"):\", e\n\t\terror.append({'url':url, 'type':'send request'})\n\t\treturn None\n\ndef getMaxPage(token):\n\tdata = sendRequest(SEARCH_URL,token)\n\ttry:\n\t\treturn data['count']/ 15 + 1\n\texcept:\n\t\tprint (\"Failed to update max page\")\n\t\treturn MAX_PAGE\n\n#get sound info with access token\ndef getSoundInfo( sound_id, token ):\n\ttry:\n\t\tdata = {}\n\t\tsound_data = sendRequest(SOUND_URL + str(sound_id), token)\n\t\tif sound_data is None:\n\t\t\traise Exception('json is none')\n\t\tdata['_id'] = sound_data[ 'id' ];\n\t\tdata['url'] = sound_data[ 'url' ];\n\t\tdata['title'] = sound_data[ 'name' ];\n\t\tdata['creator'] = sound_data[ 'username' ];\n\t\tdata['createdate'] = sound_data[ 'created' ];\n\t\tdata['description'] = sound_data[ 'description' ];\n\t\tdata['download_url'] = sound_data['download']\n\n\t\tdata['keyword'] = []\n\t\tfor tag in sound_data[ 'tags' ]:\n\t\t\tdata['keyword'].append(tag)\n\t\tdata['previews'] = []\n\t\tfor i in sound_data['previews'].keys():\n\t\t\tdata['previews'].append({i:sound_data['previews'][i]})\n\t\t\t\n\t\tdata['type'] = sound_data[ 'type' ];\n\t\tdata['bitrate'] = sound_data[ 'bitrate' ];\n\t\tdata['channels'] = sound_data[ 'channels' ];\n\t\tdata['downlaod'] = sound_data[ 'num_downloads' ];\n\t\tdata['license'] = sound_data[ 'license' ];\n\t\tdata['filesize'] = sound_data[ 'filesize' ];\n\t\treturn data;\n\texcept Exception as e:\n\t\tprint \"Error occurs while getting sound info\", sound_id, \": \", sys.exc_info()\n\t\tprint sound_data\n\t\treturn None\n\n#execute queries \ndef insertDB( db, query):\n\tif query is not None:\n\t\tresult = db.bulk_write(query, ordered = False)\n\t\tprint result.bulk_api_result\n\n\ndef crawling(token, db, page=1, page_to = MAX_PAGE):\n\theader = {'Authorization' : \"Bearer \" + token};\n\n\tprint \"From page\", page, \"to page\", page_to\n\tfor i in range(page, page_to + 1):\n\t\tif i > MAX_PAGE:\n\t\t\tprint \"Meet max page\", MAX_PAGE\n\t\t\tbreak;\n\t\turl = SEARCH_URL + \"?page=\" + str(i)\n\t\tlist_data = sendRequest(url, token)\n\t\t\n\t\ttry:\n\t\t\tupdate_queries = []\n\t\t\tfor d in list_data['results']:\n\t\t\t\tdata = getSoundInfo( d['id'], token);\n\t\t\t\tif data is None:\n\t\t\t\t\terror.append({'id': d['id']});\n\t\t\t\t\tcontinue\n\t\t\t\tprint data\n\t\t\t\tcuurent_time = datetime.datetime.utcnow();\n\t\t\t\tdata['update_at'] = cuurent_time\n\t\t\t\tupdate_queries.append(UpdateOne({'_id':data['_id']}, {'$set': data, '$setOnInsert':{'created_at':cuurent_time}},True))\n\t\t\tif db is not None:\n\t\t\t\tinsertDB(db, update_queries)\n\n\t\t\tprint \"Page\", i, \"is Done\"\n\t\texcept Exception as e:\n\t\t\tprint \"Error in page\", i, \":\", e\n\t\t\terror.append({'Exception':e, 'type':'parse data', 'data':list_data})\n\t\t\tprint list_data\n\t\tpage += 1\n\n\nif __name__ == '__main__':\n\tdb = getDB();\n\tif db is None:\n\t\tprint \"No db connected\"\n\t\texit()\n\tACCESS_TOKEN = getAccessToken();\n\tif ACCESS_TOKEN is None:\n\t\tprint \"Can't get access token\"\n\t\texit()\n\n\tMAX_PAGE = getMaxPage(ACCESS_TOKEN)\n\tfrom_page = 1\n\tto_page = MAX_PAGE\n\tif len(sys.argv) > 1:\n\t\tfrom_page = int(sys.argv[1])\n\tif len(sys.argv) > 2:\n\t\tto_page = int(sys.argv[2])\n\tcrawling(ACCESS_TOKEN, db, from_page, to_page)\n\tprint \"Error log: \",error",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.contrib import admin
from .models import Predictions
@admin.register(Predictions)
class PredictionsAdmin(admin.ModelAdmin):
pass
|
normal
|
{
"blob_id": "bab78e8a88f9a26cc13fe0c301f82880cee2b680",
"index": 965,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"step-3": "from django.contrib import admin\nfrom .models import Predictions\n\n\[email protected](Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
import logging
import datetime
import os
import time
import json
import prod
import secret
from logging.handlers import RotatingFileHandler
import requests
import sns
from kafka import KafkaProducer
logger = logging.getLogger()
logger.setLevel('INFO')
log_path = os.path.basename(__file__).split('.')[0] + '.log'
handler = RotatingFileHandler(
log_path, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
class Producer():
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(
bootstrap_servers=kafka_hosts,
acks=1,
compression_type='snappy',
retries=5,
linger_ms=200,
batch_size=1000,
request_timeout_ms=100000,
sasl_plain_username=kafka_uname,
sasl_plain_password=kafka_pwd,
security_protocol="SASL_SSL",
sasl_mechanism="PLAIN",
# sasl_mechanism="SCRAM-SHA-512",
ssl_cafile=ssl_truststore_file,
api_version=(0, 10, 1)
)
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret(
'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets["kafka_hosts"]
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - \
datetime.timedelta(minutes=minutes_before)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
qs = {"created_after": twenty_minutes_ago, "created_before": fifteen_minutes_ago, "expand_events": "false"}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns("proofpoint_trap")
logger.error(f"Error for TRAP API call: {str(e)}")
if __name__ == "__main__":
minutes_before = 0 * 60
minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')
if os.path.exists(minutes_before_file):
with open(minutes_before_file, 'r') as minutes_file:
line = minutes_file.readline()
line = line.strip()
minutes_before = int(line)
while True:
"""
Query TRAP API (JSON format) starting from minutes_before
send logs to kafka
reduce minutes_before in next iteration and repeat
when iteration reaches now -20 minutes
run the job once every 5 minutes
"""
logger.info(f'minutes before: {minutes_before}')
if minutes_before <= 0:
logger.info('waiting for 5 minutes')
time.sleep(300)
logger.info('TRAP query started')
logs = pull_pp_trap_logs(minutes_before)
logger.info('TRAP query finished')
minutes_before = minutes_before - 5
if logs:
logger.info('TRAP_produce started')
run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')
logger.info('TRAP_produce finished')
else:
logger.info("No logs for TRAP call.")
with open(minutes_before_file, 'w') as minutes_file:
minutes_before = 0 if minutes_before < 0 else minutes_before
minutes_file.write(str(minutes_before))
|
normal
|
{
"blob_id": "283b93437072f0fd75d75dab733ecab05dc9e1f3",
"index": 3872,
"step-1": "<mask token>\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger.setLevel('INFO')\n<mask token>\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\nif __name__ == '__main__':\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs,\n 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info('No logs for TRAP call.')\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))\n",
"step-4": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\nlog_path = os.path.basename(__file__).split('.')[0] + '.log'\nhandler = RotatingFileHandler(log_path, maxBytes=1000000, backupCount=5)\nformatter = logging.Formatter(\n '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s')\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\nif __name__ == '__main__':\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs,\n 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info('No logs for TRAP call.')\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))\n",
"step-5": "#!/usr/bin/env python3\nimport logging\nimport datetime\nimport os\nimport time\nimport json\n\nimport prod\nimport secret\nfrom logging.handlers import RotatingFileHandler\nimport requests\nimport sns\nfrom kafka import KafkaProducer\n\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\nlog_path = os.path.basename(__file__).split('.')[0] + '.log'\n\nhandler = RotatingFileHandler(\n log_path, maxBytes=1000000, backupCount=5)\nformatter = logging.Formatter(\n \"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer():\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n\n self.topic_name = topic\n\n self.producer = KafkaProducer(\n bootstrap_servers=kafka_hosts,\n acks=1,\n compression_type='snappy',\n retries=5,\n linger_ms=200,\n batch_size=1000,\n request_timeout_ms=100000,\n sasl_plain_username=kafka_uname,\n sasl_plain_password=kafka_pwd,\n security_protocol=\"SASL_SSL\",\n sasl_mechanism=\"PLAIN\",\n # sasl_mechanism=\"SCRAM-SHA-512\",\n ssl_cafile=ssl_truststore_file,\n api_version=(0, 10, 1)\n )\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret(\n 'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets[\"kafka_hosts\"]\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')\n raise e\n\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - \\\n datetime.timedelta(minutes=minutes_before)\n\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + \"Z\"\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + \"Z\"\n\n qs = {\"created_after\": twenty_minutes_ago, \"created_before\": fifteen_minutes_ago, \"expand_events\": \"false\"}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n\n json_object = r.json()\n print(json_object)\n return json_object\n\n except Exception as e:\n sns.generate_sns(\"proofpoint_trap\")\n logger.error(f\"Error for TRAP API call: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info(\"No logs for TRAP call.\")\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
from django import forms
class ListingForm(forms.Form):
text = forms.CharField(
max_length=50,
widget=forms.TextInput(
attrs={"class": "form-control", "placeholder": "Things to Buy"}
),
)
|
normal
|
{
"blob_id": "3f23a50f44ba17c9b0241a4e3b0e939afeb1f5f0",
"index": 3092,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ListingForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ListingForm(forms.Form):\n text = forms.CharField(max_length=50, widget=forms.TextInput(attrs={\n 'class': 'form-control', 'placeholder': 'Things to Buy'}))\n",
"step-4": "from django import forms\n\n\nclass ListingForm(forms.Form):\n text = forms.CharField(max_length=50, widget=forms.TextInput(attrs={\n 'class': 'form-control', 'placeholder': 'Things to Buy'}))\n",
"step-5": "from django import forms\n\n\nclass ListingForm(forms.Form):\n text = forms.CharField(\n max_length=50,\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Things to Buy\"}\n ),\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from numpy import array
import xspec as xs
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Grid
from spectralTools.step import Step
class xspecView(object):
def __init__(self):
#xs.Plot.device="/xs"
xs.Plot.xAxis='keV'
self.swift = []
self.nai=[]
self.bgo=[]
def LoadSwiftPHAs(self,phaFiles):
'''
Load The Swift PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-15. 150.-**")
cnts = sum(s.values)
self.swift.append(cnts)
def LoadNaiPHAs(self,phaFiles):
'''
Load The GBM NaI PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-8. 1999..-**")
cnts = sum(s.values)
self.nai.append(cnts)
def LoadBGOPHAs(self,phaFiles):
'''
Load The GBM BGO PHAs in time order
'''
for pha in phaFiles:
s = xs.Spectrum(pha)
s.ignore("**-250. 10000.-**")
cnts = sum(s.values)
self.bgo.append(cnts)
def SetTimeBins(self,starts,stops):
self.tBins = array(zip(starts,stops))
def PlotLC(self):
fig = plt.figure(1)
grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column')
Step(grid[0],self.tBins,self.swift,'r',1.)
Step(grid[1],self.tBins,self.nai,'b',1.)
Step(grid[2],self.tBins,self.bgo,'g',1.)
|
normal
|
{
"blob_id": "ba34bae7849ad97f939c1a7cb91461269cd58b64",
"index": 8994,
"step-1": "<mask token>\n\n\nclass xspecView(object):\n <mask token>\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n <mask token>\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-2": "<mask token>\n\n\nclass xspecView(object):\n <mask token>\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-3": "<mask token>\n\n\nclass xspecView(object):\n\n def __init__(self):\n xs.Plot.xAxis = 'keV'\n self.swift = []\n self.nai = []\n self.bgo = []\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-4": "from numpy import array\nimport xspec as xs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import Grid\nfrom spectralTools.step import Step\n\n\nclass xspecView(object):\n\n def __init__(self):\n xs.Plot.xAxis = 'keV'\n self.swift = []\n self.nai = []\n self.bgo = []\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n",
"step-5": "from numpy import array\nimport xspec as xs \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import Grid\nfrom spectralTools.step import Step\n\n\n\nclass xspecView(object):\n\n\n def __init__(self):\n\n #xs.Plot.device=\"/xs\"\n xs.Plot.xAxis='keV'\n\n self.swift = []\n self.nai=[]\n self.bgo=[]\n\n def LoadSwiftPHAs(self,phaFiles):\n '''\n Load The Swift PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-15. 150.-**\")\n\n cnts = sum(s.values)\n\n\n self.swift.append(cnts)\n\n\n def LoadNaiPHAs(self,phaFiles):\n '''\n Load The GBM NaI PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-8. 1999..-**\")\n cnts = sum(s.values)\n\n self.nai.append(cnts)\n\n\n def LoadBGOPHAs(self,phaFiles):\n '''\n Load The GBM BGO PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-250. 10000.-**\")\n cnts = sum(s.values)\n\n self.bgo.append(cnts)\n \n\n\n def SetTimeBins(self,starts,stops):\n\n self.tBins = array(zip(starts,stops))\n\n \n\n def PlotLC(self):\n\n fig = plt.figure(1)\n\n grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column')\n \n Step(grid[0],self.tBins,self.swift,'r',1.)\n\n Step(grid[1],self.tBins,self.nai,'b',1.)\n\n Step(grid[2],self.tBins,self.bgo,'g',1.)\n \n\n \n \n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import numpy as np
from global_module.implementation_module import Autoencoder
from global_module.implementation_module import Reader
import tensorflow as tf
from global_module.settings_module import ParamsClass, Directory, Dictionary
import random
import sys
import time
class Test:
def __init__(self):
self.iter_test = 0
def run_epoch(self, session, min_loss, model_obj, reader, input, writer):
global epoch_combined_loss, step
params = model_obj.params
epoch_combined_loss = 0.0
output_file = open(model_obj.dir_obj.log_emb_path + '/latent_representation.csv', 'w')
for step, curr_input in enumerate(reader.data_iterator(input)):
feed_dict = {model_obj.input: curr_input}
total_loss, latent_rep, summary_test = session.run([model_obj.loss, model_obj.rep, model_obj.merged_summary_test], feed_dict=feed_dict)
epoch_combined_loss += total_loss
self.iter_test += 1
if self.iter_test % params.log_step == 0 and params.log:
writer.add_summary(summary_test, self.iter_test)
for each_rep in latent_rep:
output_file.write(' '.join(str(x) for x in each_rep).strip() + '\n')
epoch_combined_loss /= step
output_file.close()
return epoch_combined_loss, min_loss
def run_test(self):
global test_writer
mode_test = 'TE'
# test object
params_test = ParamsClass(mode=mode_test)
dir_test = Directory(mode_test)
test_reader = Reader(params_test)
test_instances = test_reader.read_image_data(dir_test.data_filename)
random.seed(4321)
global_min_loss = sys.float_info.max
print('***** INITIALIZING TF GRAPH *****')
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope("model"):
test_obj = Autoencoder(params_test, dir_test)
model_saver = tf.train.Saver()
model_saver.restore(session, test_obj.dir_obj.test_model)
if params_test.log:
test_writer = tf.summary.FileWriter(dir_test.log_path + '/test')
print('**** TF GRAPH INITIALIZED ****')
start_time = time.time()
test_loss, _, = self.run_epoch(session, global_min_loss, test_obj, test_reader, test_instances, test_writer)
print("Epoch: %d Test loss: %.4f" % (1, test_loss))
curr_time = time.time()
print('1 epoch run takes ' + str((curr_time - start_time) / 60) + ' minutes.')
if params_test.log:
test_writer.close()
|
normal
|
{
"blob_id": "e008f9b11a9b7480e9fb53391870809d6dea5497",
"index": 3953,
"step-1": "<mask token>\n\n\nclass Test:\n\n def __init__(self):\n self.iter_test = 0\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Test:\n\n def __init__(self):\n self.iter_test = 0\n\n def run_epoch(self, session, min_loss, model_obj, reader, input, writer):\n global epoch_combined_loss, step\n params = model_obj.params\n epoch_combined_loss = 0.0\n output_file = open(model_obj.dir_obj.log_emb_path +\n '/latent_representation.csv', 'w')\n for step, curr_input in enumerate(reader.data_iterator(input)):\n feed_dict = {model_obj.input: curr_input}\n total_loss, latent_rep, summary_test = session.run([model_obj.\n loss, model_obj.rep, model_obj.merged_summary_test],\n feed_dict=feed_dict)\n epoch_combined_loss += total_loss\n self.iter_test += 1\n if self.iter_test % params.log_step == 0 and params.log:\n writer.add_summary(summary_test, self.iter_test)\n for each_rep in latent_rep:\n output_file.write(' '.join(str(x) for x in each_rep).strip(\n ) + '\\n')\n epoch_combined_loss /= step\n output_file.close()\n return epoch_combined_loss, min_loss\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Test:\n\n def __init__(self):\n self.iter_test = 0\n\n def run_epoch(self, session, min_loss, model_obj, reader, input, writer):\n global epoch_combined_loss, step\n params = model_obj.params\n epoch_combined_loss = 0.0\n output_file = open(model_obj.dir_obj.log_emb_path +\n '/latent_representation.csv', 'w')\n for step, curr_input in enumerate(reader.data_iterator(input)):\n feed_dict = {model_obj.input: curr_input}\n total_loss, latent_rep, summary_test = session.run([model_obj.\n loss, model_obj.rep, model_obj.merged_summary_test],\n feed_dict=feed_dict)\n epoch_combined_loss += total_loss\n self.iter_test += 1\n if self.iter_test % params.log_step == 0 and params.log:\n writer.add_summary(summary_test, self.iter_test)\n for each_rep in latent_rep:\n output_file.write(' '.join(str(x) for x in each_rep).strip(\n ) + '\\n')\n epoch_combined_loss /= step\n output_file.close()\n return epoch_combined_loss, min_loss\n\n def run_test(self):\n global test_writer\n mode_test = 'TE'\n params_test = ParamsClass(mode=mode_test)\n dir_test = Directory(mode_test)\n test_reader = Reader(params_test)\n test_instances = test_reader.read_image_data(dir_test.data_filename)\n random.seed(4321)\n global_min_loss = sys.float_info.max\n print('***** INITIALIZING TF GRAPH *****')\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.variable_scope('model'):\n test_obj = Autoencoder(params_test, dir_test)\n model_saver = tf.train.Saver()\n model_saver.restore(session, test_obj.dir_obj.test_model)\n if params_test.log:\n test_writer = tf.summary.FileWriter(dir_test.log_path + '/test'\n )\n print('**** TF GRAPH INITIALIZED ****')\n start_time = time.time()\n test_loss, _ = self.run_epoch(session, global_min_loss,\n test_obj, test_reader, test_instances, test_writer)\n print('Epoch: %d Test loss: %.4f' % (1, test_loss))\n curr_time = time.time()\n print('1 epoch run takes ' + str((curr_time - start_time) / 60) +\n ' minutes.')\n if params_test.log:\n test_writer.close()\n",
"step-4": "import numpy as np\nfrom global_module.implementation_module import Autoencoder\nfrom global_module.implementation_module import Reader\nimport tensorflow as tf\nfrom global_module.settings_module import ParamsClass, Directory, Dictionary\nimport random\nimport sys\nimport time\n\n\nclass Test:\n\n def __init__(self):\n self.iter_test = 0\n\n def run_epoch(self, session, min_loss, model_obj, reader, input, writer):\n global epoch_combined_loss, step\n params = model_obj.params\n epoch_combined_loss = 0.0\n output_file = open(model_obj.dir_obj.log_emb_path +\n '/latent_representation.csv', 'w')\n for step, curr_input in enumerate(reader.data_iterator(input)):\n feed_dict = {model_obj.input: curr_input}\n total_loss, latent_rep, summary_test = session.run([model_obj.\n loss, model_obj.rep, model_obj.merged_summary_test],\n feed_dict=feed_dict)\n epoch_combined_loss += total_loss\n self.iter_test += 1\n if self.iter_test % params.log_step == 0 and params.log:\n writer.add_summary(summary_test, self.iter_test)\n for each_rep in latent_rep:\n output_file.write(' '.join(str(x) for x in each_rep).strip(\n ) + '\\n')\n epoch_combined_loss /= step\n output_file.close()\n return epoch_combined_loss, min_loss\n\n def run_test(self):\n global test_writer\n mode_test = 'TE'\n params_test = ParamsClass(mode=mode_test)\n dir_test = Directory(mode_test)\n test_reader = Reader(params_test)\n test_instances = test_reader.read_image_data(dir_test.data_filename)\n random.seed(4321)\n global_min_loss = sys.float_info.max\n print('***** INITIALIZING TF GRAPH *****')\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.variable_scope('model'):\n test_obj = Autoencoder(params_test, dir_test)\n model_saver = tf.train.Saver()\n model_saver.restore(session, test_obj.dir_obj.test_model)\n if params_test.log:\n test_writer = tf.summary.FileWriter(dir_test.log_path + '/test'\n )\n print('**** TF GRAPH INITIALIZED ****')\n start_time = time.time()\n test_loss, _ = self.run_epoch(session, global_min_loss,\n test_obj, test_reader, test_instances, test_writer)\n print('Epoch: %d Test loss: %.4f' % (1, test_loss))\n curr_time = time.time()\n print('1 epoch run takes ' + str((curr_time - start_time) / 60) +\n ' minutes.')\n if params_test.log:\n test_writer.close()\n",
"step-5": "import numpy as np\nfrom global_module.implementation_module import Autoencoder\nfrom global_module.implementation_module import Reader\nimport tensorflow as tf\nfrom global_module.settings_module import ParamsClass, Directory, Dictionary\nimport random\nimport sys\nimport time\n\n\nclass Test:\n def __init__(self):\n self.iter_test = 0\n\n def run_epoch(self, session, min_loss, model_obj, reader, input, writer):\n global epoch_combined_loss, step\n params = model_obj.params\n epoch_combined_loss = 0.0\n\n output_file = open(model_obj.dir_obj.log_emb_path + '/latent_representation.csv', 'w')\n\n for step, curr_input in enumerate(reader.data_iterator(input)):\n feed_dict = {model_obj.input: curr_input}\n total_loss, latent_rep, summary_test = session.run([model_obj.loss, model_obj.rep, model_obj.merged_summary_test], feed_dict=feed_dict)\n\n epoch_combined_loss += total_loss\n\n self.iter_test += 1\n if self.iter_test % params.log_step == 0 and params.log:\n writer.add_summary(summary_test, self.iter_test)\n\n for each_rep in latent_rep:\n output_file.write(' '.join(str(x) for x in each_rep).strip() + '\\n')\n\n epoch_combined_loss /= step\n output_file.close()\n return epoch_combined_loss, min_loss\n\n def run_test(self):\n global test_writer\n mode_test = 'TE'\n\n # test object\n params_test = ParamsClass(mode=mode_test)\n dir_test = Directory(mode_test)\n test_reader = Reader(params_test)\n test_instances = test_reader.read_image_data(dir_test.data_filename)\n\n random.seed(4321)\n\n global_min_loss = sys.float_info.max\n\n print('***** INITIALIZING TF GRAPH *****')\n\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.variable_scope(\"model\"):\n test_obj = Autoencoder(params_test, dir_test)\n\n model_saver = tf.train.Saver()\n model_saver.restore(session, test_obj.dir_obj.test_model)\n\n if params_test.log:\n test_writer = tf.summary.FileWriter(dir_test.log_path + '/test')\n\n print('**** TF GRAPH INITIALIZED ****')\n\n start_time = time.time()\n\n test_loss, _, = self.run_epoch(session, global_min_loss, test_obj, test_reader, test_instances, test_writer)\n print(\"Epoch: %d Test loss: %.4f\" % (1, test_loss))\n\n curr_time = time.time()\n print('1 epoch run takes ' + str((curr_time - start_time) / 60) + ' minutes.')\n\n if params_test.log:\n test_writer.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import re
def password_validation(password):
return bool(re.search("^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)[a-zA-Z\d]{6,}$", password))
|
normal
|
{
"blob_id": "d44c76ff7e94bea6e03324c45d139602c724c7be",
"index": 2539,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef password_validation(password):\n return bool(re.search(\n '^(?=.*[a-z])(?=.*[A-Z])(?=.*\\\\d)[a-zA-Z\\\\d]{6,}$', password))\n",
"step-3": "import re\n\n\ndef password_validation(password):\n return bool(re.search(\n '^(?=.*[a-z])(?=.*[A-Z])(?=.*\\\\d)[a-zA-Z\\\\d]{6,}$', password))\n",
"step-4": "import re\n\ndef password_validation(password):\n return bool(re.search(\"^(?=.*[a-z])(?=.*[A-Z])(?=.*\\d)[a-zA-Z\\d]{6,}$\", password))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
br = webdriver.Chrome();
lk = 'http://suninjuly.github.io/get_attribute.html'
br.get(lk)
#собираю
treasure=br.find_element_by_id('treasure')
valuex = treasure.get_attribute('valuex')
radio_button = br.find_element_by_id('robotsRule')
check_box = br.find_element_by_id('robotCheckbox')
input_text = br.find_element_by_id('answer')
button = br.find_element_by_css_selector('button.btn')
#раздаю
answer = calc(valuex)
check_box.click()
radio_button.click()
input_text.send_keys(answer)
button.click()
finally:
time.sleep(10)
br.quit()
|
normal
|
{
"blob_id": "2a92c47231b75a441660fed80a9bce9a35695af5",
"index": 1222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\ntry:\n br = webdriver.Chrome()\n lk = 'http://suninjuly.github.io/get_attribute.html'\n br.get(lk)\n treasure = br.find_element_by_id('treasure')\n valuex = treasure.get_attribute('valuex')\n radio_button = br.find_element_by_id('robotsRule')\n check_box = br.find_element_by_id('robotCheckbox')\n input_text = br.find_element_by_id('answer')\n button = br.find_element_by_css_selector('button.btn')\n answer = calc(valuex)\n check_box.click()\n radio_button.click()\n input_text.send_keys(answer)\n button.click()\nfinally:\n time.sleep(10)\n br.quit()\n",
"step-4": "from selenium import webdriver\nimport time\nimport math\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\n\ntry:\n br = webdriver.Chrome()\n lk = 'http://suninjuly.github.io/get_attribute.html'\n br.get(lk)\n treasure = br.find_element_by_id('treasure')\n valuex = treasure.get_attribute('valuex')\n radio_button = br.find_element_by_id('robotsRule')\n check_box = br.find_element_by_id('robotCheckbox')\n input_text = br.find_element_by_id('answer')\n button = br.find_element_by_css_selector('button.btn')\n answer = calc(valuex)\n check_box.click()\n radio_button.click()\n input_text.send_keys(answer)\n button.click()\nfinally:\n time.sleep(10)\n br.quit()\n",
"step-5": "from selenium import webdriver\nimport time\nimport math\n\ndef calc(x):\n\treturn str(math.log(abs(12*math.sin(int(x)))))\n\n\ntry:\n\tbr = webdriver.Chrome();\n\tlk = 'http://suninjuly.github.io/get_attribute.html'\n\tbr.get(lk)\n\n#собираю\n\ttreasure=br.find_element_by_id('treasure')\n\tvaluex = treasure.get_attribute('valuex')\n\tradio_button = br.find_element_by_id('robotsRule')\n\tcheck_box = br.find_element_by_id('robotCheckbox')\n\tinput_text = br.find_element_by_id('answer')\n\tbutton = br.find_element_by_css_selector('button.btn')\t\n#раздаю\n\tanswer = calc(valuex)\n\tcheck_box.click()\n\tradio_button.click()\n\tinput_text.send_keys(answer)\n\tbutton.click()\n\t\nfinally:\n\ttime.sleep(10)\n\tbr.quit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import sys
def avg (x):
return [sum(x[i])/row for i in range(col)]
def sd (x):
return [np.std(x[i]) for i in range(col)]
def cov (x, md_x):
cov_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
for k in range (row):
cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)
return(cov_xy)
def cor (cov, sd_x):
cor_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])
print("cov= ",cov[i][j],"sd i", sd_x[i], " sd k", sd_x[j],"cov/sd", cov[i][j]/(sd_x[i]*sd_x[j]))
return(cor_xy)
if __name__ == "__main__":
argv=sys.argv[:]
if len(argv)<2:
print("1 argument required. Provide data file name")
sys.exit(0)
data=pd.read_csv(argv[1],header= None)
row=data.shape[0]
col=data.shape[1]
print("** dataset dimensions **")
print(row)
print(col)
mean=avg(data)
stdev=sd(data)
print(stdev)
covar=cov(data, mean)
correl=cor(covar, stdev)
print("---------CORRELATION MATRIX---------")
print(correl)
|
normal
|
{
"blob_id": "ad3c5ed3d6a9aa83e69f53d3fec845e8e2b1c9c6",
"index": 883,
"step-1": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\n<mask token>\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\nif __name__ == '__main__':\n argv = sys.argv[:]\n if len(argv) < 2:\n print('1 argument required. Provide data file name')\n sys.exit(0)\n data = pd.read_csv(argv[1], header=None)\n row = data.shape[0]\n col = data.shape[1]\n print('** dataset dimensions **')\n print(row)\n print(col)\n mean = avg(data)\n stdev = sd(data)\n print(stdev)\n covar = cov(data, mean)\n correl = cor(covar, stdev)\n print('---------CORRELATION MATRIX---------')\n print(correl)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\nif __name__ == '__main__':\n argv = sys.argv[:]\n if len(argv) < 2:\n print('1 argument required. Provide data file name')\n sys.exit(0)\n data = pd.read_csv(argv[1], header=None)\n row = data.shape[0]\n col = data.shape[1]\n print('** dataset dimensions **')\n print(row)\n print(col)\n mean = avg(data)\n stdev = sd(data)\n print(stdev)\n covar = cov(data, mean)\n correl = cor(covar, stdev)\n print('---------CORRELATION MATRIX---------')\n print(correl)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport sys\n\ndef avg (x):\n return [sum(x[i])/row for i in range(col)]\n\ndef sd (x):\n return [np.std(x[i]) for i in range(col)]\n\ndef cov (x, md_x):\n cov_xy=[[0 for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range (col):\n for k in range (row):\n cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)\n return(cov_xy)\n\ndef cor (cov, sd_x):\n cor_xy=[[0 for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range (col):\n cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])\n print(\"cov= \",cov[i][j],\"sd i\", sd_x[i], \" sd k\", sd_x[j],\"cov/sd\", cov[i][j]/(sd_x[i]*sd_x[j]))\n return(cor_xy)\n\n\nif __name__ == \"__main__\":\n \n argv=sys.argv[:]\n \n if len(argv)<2:\n print(\"1 argument required. Provide data file name\")\n sys.exit(0)\n \n data=pd.read_csv(argv[1],header= None)\n row=data.shape[0]\n col=data.shape[1]\n print(\"** dataset dimensions **\")\n print(row)\n print(col)\n mean=avg(data)\n stdev=sd(data)\n print(stdev)\n \n covar=cov(data, mean)\n correl=cor(covar, stdev)\n print(\"---------CORRELATION MATRIX---------\")\n print(correl)\n \n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# https://leetcode.com/problems/wiggle-subsequence/
#
# algorithms
# Medium (36.9%)
# Total Accepted: 43,722
# Total Submissions: 118,490
# beats 100.0% of python submissions
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
if length < 2:
return length
dp = [[0] * 2 for _ in xrange(length)]
dp[0] = [1, 1]
for i in xrange(1, length):
if nums[i] > nums[i - 1]:
dp[i][0] += dp[i - 1][1] + 1
dp[i][1] = dp[i - 1][1]
elif nums[i] < nums[i - 1]:
dp[i][1] += dp[i - 1][0] + 1
dp[i][0] = dp[i - 1][0]
else:
dp[i] = dp[i - 1]
return max(dp[-1])
|
normal
|
{
"blob_id": "6c1f7b8e71760cac443a06f68f5f6ee3c2151e50",
"index": 8170,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def wiggleMaxLength(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n length = len(nums)\n if length < 2:\n return length\n dp = [([0] * 2) for _ in xrange(length)]\n dp[0] = [1, 1]\n for i in xrange(1, length):\n if nums[i] > nums[i - 1]:\n dp[i][0] += dp[i - 1][1] + 1\n dp[i][1] = dp[i - 1][1]\n elif nums[i] < nums[i - 1]:\n dp[i][1] += dp[i - 1][0] + 1\n dp[i][0] = dp[i - 1][0]\n else:\n dp[i] = dp[i - 1]\n return max(dp[-1])\n",
"step-4": "# https://leetcode.com/problems/wiggle-subsequence/\n#\n# algorithms\n# Medium (36.9%)\n# Total Accepted: 43,722\n# Total Submissions: 118,490\n# beats 100.0% of python submissions\n\n\nclass Solution(object):\n def wiggleMaxLength(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n length = len(nums)\n\n if length < 2:\n return length\n\n dp = [[0] * 2 for _ in xrange(length)]\n dp[0] = [1, 1]\n\n for i in xrange(1, length):\n if nums[i] > nums[i - 1]:\n dp[i][0] += dp[i - 1][1] + 1\n dp[i][1] = dp[i - 1][1]\n elif nums[i] < nums[i - 1]:\n dp[i][1] += dp[i - 1][0] + 1\n dp[i][0] = dp[i - 1][0]\n else:\n dp[i] = dp[i - 1]\n\n return max(dp[-1])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from bs4 import BeautifulSoup
import requests
res = requests.get('http://quotes.toscrape.com/')
#print(res.content)
#proper ordered printing
#print(res.text)
#lxml -> parser library
soup = BeautifulSoup(res.text , 'lxml')
#print(soup)
quote = soup.find_all('div',{'class' : 'quote'})
with open('Quotes.txt','w') as ff:
for q in quote:
msg = q.find('span',{'class' : 'text'})
print(msg.text)
ff.write(msg.text)
author = q.find('small',{'class' : 'author'})
print(author.text)
ff.write("\n")
ff.write(author.text)
print()
ff.write("\n\n")
|
normal
|
{
"blob_id": "777c08876a2de803fc95de937d9e921044545ef8",
"index": 3674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('Quotes.txt', 'w') as ff:\n for q in quote:\n msg = q.find('span', {'class': 'text'})\n print(msg.text)\n ff.write(msg.text)\n author = q.find('small', {'class': 'author'})\n print(author.text)\n ff.write('\\n')\n ff.write(author.text)\n print()\n ff.write('\\n\\n')\n",
"step-3": "<mask token>\nres = requests.get('http://quotes.toscrape.com/')\nsoup = BeautifulSoup(res.text, 'lxml')\nquote = soup.find_all('div', {'class': 'quote'})\nwith open('Quotes.txt', 'w') as ff:\n for q in quote:\n msg = q.find('span', {'class': 'text'})\n print(msg.text)\n ff.write(msg.text)\n author = q.find('small', {'class': 'author'})\n print(author.text)\n ff.write('\\n')\n ff.write(author.text)\n print()\n ff.write('\\n\\n')\n",
"step-4": "from bs4 import BeautifulSoup\nimport requests\nres = requests.get('http://quotes.toscrape.com/')\nsoup = BeautifulSoup(res.text, 'lxml')\nquote = soup.find_all('div', {'class': 'quote'})\nwith open('Quotes.txt', 'w') as ff:\n for q in quote:\n msg = q.find('span', {'class': 'text'})\n print(msg.text)\n ff.write(msg.text)\n author = q.find('small', {'class': 'author'})\n print(author.text)\n ff.write('\\n')\n ff.write(author.text)\n print()\n ff.write('\\n\\n')\n",
"step-5": "from bs4 import BeautifulSoup\r\nimport requests\r\n\r\nres = requests.get('http://quotes.toscrape.com/')\r\n#print(res.content)\r\n#proper ordered printing\r\n#print(res.text)\r\n#lxml -> parser library\r\nsoup = BeautifulSoup(res.text , 'lxml')\r\n#print(soup)\r\n\r\nquote = soup.find_all('div',{'class' : 'quote'})\r\nwith open('Quotes.txt','w') as ff:\r\n for q in quote:\r\n msg = q.find('span',{'class' : 'text'})\r\n print(msg.text)\r\n ff.write(msg.text)\r\n author = q.find('small',{'class' : 'author'})\r\n print(author.text)\r\n ff.write(\"\\n\")\r\n ff.write(author.text)\r\n print()\r\n ff.write(\"\\n\\n\")\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Exercício Python 20: O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
import random
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
# o shuffle embaralha os dados da lista
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
|
normal
|
{
"blob_id": "445bb8ad8dadd207a3546f4623de583fc47a2910",
"index": 2180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrandom.shuffle(listaAlunos)\nprint('A ordem de apresentação será ', listaAlunos)\n",
"step-3": "<mask token>\naluno1 = input('Primeiro aluno: ')\naluno2 = input('Segundo aluno: ')\naluno3 = input('Terceiro aluno: ')\naluno4 = input('Quarto aluno: ')\nlistaAlunos = [aluno1, aluno2, aluno3, aluno4]\nrandom.shuffle(listaAlunos)\nprint('A ordem de apresentação será ', listaAlunos)\n",
"step-4": "import random\naluno1 = input('Primeiro aluno: ')\naluno2 = input('Segundo aluno: ')\naluno3 = input('Terceiro aluno: ')\naluno4 = input('Quarto aluno: ')\nlistaAlunos = [aluno1, aluno2, aluno3, aluno4]\nrandom.shuffle(listaAlunos)\nprint('A ordem de apresentação será ', listaAlunos)\n",
"step-5": "# Exercício Python 20: O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.\r\nimport random\r\n\r\naluno1 = input('Primeiro aluno: ')\r\naluno2 = input('Segundo aluno: ')\r\naluno3 = input('Terceiro aluno: ')\r\naluno4 = input('Quarto aluno: ')\r\nlistaAlunos = [aluno1, aluno2, aluno3, aluno4]\r\n# o shuffle embaralha os dados da lista\r\nrandom.shuffle(listaAlunos)\r\nprint('A ordem de apresentação será ', listaAlunos)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# coding: utf-8
# Predicting Surviving the Sinking of the Titanic
# -----------------------------------------------
#
#
# This represents my first attempt at training up some classifiers for the titanic dataset.
# In[ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
sns.set_style("whitegrid")
# machine learning
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
combine = [train_df, test_df]
# # Data exploration #
#
# First get some summary statistics about the datasets.
# In[ ]:
# view column labels
print(train_df.columns.values)
# In[ ]:
# preview the data
train_df.head()
# Now transpose the first few rows in order to see all attributes more easily as row labels.
# In[ ]:
train_df.head(3).T
# In[ ]:
# missing values, data types
train_df.info()
print('-'*40)
test_df.info()
# The above info shows that columns (from training data) with missing/empty values are:
#
# - Age (177 missing values)
# - Cabin (687 missing values)
# - Embarked (2 missing values)
# In[ ]:
# describe numeric columns
train_df.describe()
# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.
# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.
# In[ ]:
# describe categorical columns
train_df.describe(include=['O'])
# In[ ]:
# just for fun, examine the records of ten year olds (there are only two)
train_df[train_df.Age == 10].stack()
# # Detailed data investigation #
#
# A closer look at each of the attributes (columns) and their relationship to survival.
# ##Sex##
#
# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.
# In[ ]:
# count passengers by sex
plt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)
sns.countplot(x='Sex', data=train_df, palette='Greens_d')
# survival rate by sex
# note that barplot plots mean() on y by default
plt.subplot(212)
sns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')
# **Observations:**
#
# - Many more males than females
# - Survival rate of females much greater than males
#
# Let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by sex
train_df.groupby('Sex').size()
# In[ ]:
# survival rates by sex
train_df.groupby(['Sex'])['Survived'].mean().sort_values()
# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.
# ##Passenger class##
#
# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.
# In[ ]:
# size of groups in passenger class
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette
# survival rate by sex
plt.subplot(212)
sns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')
# **Observations:**
#
# - Three classes
# - Most passengers travelled by 3rd class (more than half; see below)
# - Survival rate increases with class
#
# Again, let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by passenger class
train_df.groupby(['Pclass']).size()
# In[ ]:
# survival rates by passenger class
train_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)
# ##Age##
#
# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
#
# The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation.
# In[ ]:
# split the datasets into matched input and ouput pairs
X_train = train_df.drop("Survived", axis=1) # X = inputs
Y_train = train_df["Survived"] # Y = outputs
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Model fitting
# ----------
# (Some of this section is based on [this titanic tutorial][1].)
#
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].
#
# Note the confidence score generated by the model based on our training dataset.
#
#
# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
# [2]: https://en.wikipedia.org/wiki/Logistic_regression
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
scores = cross_val_score(logreg, X_train, Y_train, cv=10)
acc_log = round(scores.mean() * 100, 2)
acc_log
#Y_pred = logreg.predict(X_test)
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
# In[ ]:
logreg.fit(X_train, Y_train)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, Y_train, cv=10)
acc_gaussian = round(scores.mean() * 100, 2)
acc_gaussian
# In[ ]:
# Perceptron (a single layer neural net)
perceptron = Perceptron()
scores = cross_val_score(perceptron, X_train, Y_train, cv=10)
acc_perceptron = round(scores.mean() * 100, 2)
acc_perceptron
# In[ ]:
# Neural Network (a multi layer neural net)
neural_net = MLPClassifier()
scores = cross_val_score(neural_net, X_train, Y_train, cv=10)
acc_neural_net = round(scores.mean() * 100, 2)
acc_neural_net
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
scores = cross_val_score(sgd, X_train, Y_train, cv=10)
acc_sgd = round(scores.mean() * 100, 2)
acc_sgd
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, Y_train, cv=10)
acc_linear_svc = round(scores.mean() * 100, 2)
acc_linear_svc
# In[ ]:
# Support Vector Machine
svc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)
scores = cross_val_score(svc, X_train, Y_train, cv=10)
acc_svc = round(scores.mean() * 100, 2)
acc_svc
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, Y_train, cv=10)
acc_decision_tree = round(scores.mean() * 100, 2)
acc_decision_tree
# In[ ]:
# Random Forest - an ensemble model
random_forest = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(random_forest, X_train, Y_train, cv=10)
acc_random_forest = round(scores.mean() * 100, 2)
acc_random_forest
# In[ ]:
# AdaBoost - an ensemble method
ada_boost = AdaBoostClassifier(n_estimators=100)
scores = cross_val_score(ada_boost, X_train, Y_train, cv=10)
acc_ada_boost = round(scores.mean() * 100, 2)
acc_ada_boost
# In[ ]:
# k-Nearest Neighbors - a non-parametric method
knn = KNeighborsClassifier(n_neighbors = 5)
scores = cross_val_score(knn, X_train, Y_train, cv=10)
acc_knn = round(scores.mean() * 100, 2)
acc_knn
# Model evaluation
# ----------------
#
# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart.
#
# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Descent', 'Linear SVC',
'Decision Tree', 'AdaBoost', 'Neural Network'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree,
acc_ada_boost, acc_neural_net]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
# using random forest for submission
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('titanic_submission_1.csv', index=False)
#pd.set_option('display.max_rows', len(submission))
#submission
# Use cross validation to assess predictive accuracy
# --------------------------------------------------
#
# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples.
#
# In[ ]:
# Random Forest : scoring on training data
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# What next?
# -------------------------------
#
# **_More feature exploration:_**
# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.
#
# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.
#
# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).
#
# **_Refitting:_**
# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).
#
#
# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html
# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search
|
normal
|
{
"blob_id": "05f143e28ff9c7397376ad598529c1dfb7528ee3",
"index": 7269,
"step-1": "<mask token>\n\n\ndef get_na(dataset):\n na_males = dataset[dataset.Sex == 'male'].loc[:, 'AgeGroup'].isnull().sum()\n na_females = dataset[dataset.Sex == 'female'].loc[:, 'AgeGroup'].isnull(\n ).sum()\n return {'male': na_males, 'female': na_females}\n\n\ndef get_counts(dataset):\n return dataset.groupby(['Sex', 'AgeGroup']).size()\n\n\ndef generate_age_groups(num, freq):\n age_groups = {}\n for sex in ['male', 'female']:\n relfreq = freq[sex] / freq[sex].sum()\n age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex],\n replace=True, p=relfreq)\n return age_groups\n\n\ndef insert_age_group_values(dataset, age_groups):\n for sex in ['male', 'female']:\n tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]\n )\n tmp['AgeGroup'] = age_groups[sex]\n dataset = dataset.combine_first(tmp)\n return dataset\n\n\n<mask token>\n",
"step-2": "<mask token>\nget_ipython().magic(u'matplotlib inline')\nsns.set_style('whitegrid')\n<mask token>\nprint(train_df.columns.values)\ntrain_df.head()\ntrain_df.head(3).T\ntrain_df.info()\nprint('-' * 40)\ntest_df.info()\ntrain_df.describe()\ntrain_df.describe(include=['O'])\ntrain_df[train_df.Age == 10].stack()\nplt.subplot(211)\nsns.countplot(x='Sex', data=train_df, palette='Greens_d')\nplt.subplot(212)\nsns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')\ntrain_df.groupby('Sex').size()\ntrain_df.groupby(['Sex'])['Survived'].mean().sort_values()\nplt.subplots(figsize=(8, 6))\nplt.subplot(211)\nsns.countplot(x='Pclass', data=train_df, palette='Purples_d')\nplt.subplot(212)\nsns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')\ntrain_df.groupby(['Pclass']).size()\ntrain_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)\ntrain_df.groupby('Age').size().head(25)\n<mask token>\nplt.subplots(figsize=(18, 6))\nplt.subplot(311)\nsns.kdeplot(age['Age'], shade=True, cut=0)\nplt.subplot(312)\nsns.countplot(x='Age', data=age, palette='GnBu_d')\nplt.subplot(313)\nsns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d')\n<mask token>\ntrain_df.groupby('AgeGroup')['Survived'].mean()\ntrain_df[['Survived', 'AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()\nsns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')\nsns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar'\n )\n<mask token>\nplt.subplot(211)\nsns.countplot(x='FamilySize', data=train_df)\nplt.subplot(212)\nsns.barplot(x='FamilySize', y='Survived', data=train_df)\n<mask token>\ntrain_df[['PassengerId', 'Name', 'Cabin', 'Deck']].head(2).T\nplt.subplots(figsize=(8, 6))\nplt.subplot(211)\nsns.countplot(x='Deck', data=train_df)\nplt.subplot(212)\nsns.barplot(x='Deck', y='Survived', data=train_df)\n\n\ndef get_na(dataset):\n na_males = dataset[dataset.Sex == 'male'].loc[:, 'AgeGroup'].isnull().sum()\n na_females = dataset[dataset.Sex == 'female'].loc[:, 'AgeGroup'].isnull(\n ).sum()\n return {'male': na_males, 'female': na_females}\n\n\ndef get_counts(dataset):\n return dataset.groupby(['Sex', 'AgeGroup']).size()\n\n\ndef generate_age_groups(num, freq):\n age_groups = {}\n for sex in ['male', 'female']:\n relfreq = freq[sex] / freq[sex].sum()\n age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex],\n replace=True, p=relfreq)\n return age_groups\n\n\ndef insert_age_group_values(dataset, age_groups):\n for sex in ['male', 'female']:\n tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]\n )\n tmp['AgeGroup'] = age_groups[sex]\n dataset = dataset.combine_first(tmp)\n return dataset\n\n\n<mask token>\ncounts['female']\n<mask token>\nage_groups['female']\n<mask token>\ntrain_df.info()\nprint('-' * 40)\n<mask token>\ntest_df.info()\n<mask token>\ntrain_df[['Name', 'Sex', 'Female']].head(2).T\n<mask token>\ntrain_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T\n<mask token>\ntest_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)\nprint(train_df.groupby('Embarked').size().sort_values())\n<mask token>\ntrain_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp',\n 'Parch', 'Ticket', 'Cabin', 'Fare', 'Embarked', 'Deck', 'AgeGroup'],\n axis=1, inplace=True)\ntest_df.drop(['Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket',\n 'Cabin', 'Fare', 'Embarked', 'AgeGroup'], axis=1, inplace=True)\ntrain_df.head(10).T\n<mask token>\nX_train.shape, Y_train.shape, X_test.shape\n<mask token>\nacc_log\nlogreg.fit(X_train, Y_train)\n<mask token>\ncoeff_df.sort_values(by='Correlation', ascending=False)\n<mask token>\nacc_gaussian\n<mask token>\nacc_perceptron\n<mask token>\nacc_neural_net\n<mask token>\nacc_sgd\n<mask token>\nacc_linear_svc\n<mask token>\nacc_svc\n<mask token>\nacc_decision_tree\n<mask token>\nacc_random_forest\n<mask token>\nacc_ada_boost\n<mask token>\nacc_knn\n<mask token>\nmodels.sort_values(by='Score', ascending=False)\nrandom_forest.fit(X_train, Y_train)\n<mask token>\nsubmission.to_csv('titanic_submission_1.csv', index=False)\n<mask token>\nrandom_forest.fit(X_train, Y_train)\n<mask token>\nacc_random_forest\n",
"step-3": "<mask token>\nget_ipython().magic(u'matplotlib inline')\nsns.set_style('whitegrid')\n<mask token>\ntrain_df = pd.read_csv('../input/train.csv')\ntest_df = pd.read_csv('../input/test.csv')\ncombine = [train_df, test_df]\nprint(train_df.columns.values)\ntrain_df.head()\ntrain_df.head(3).T\ntrain_df.info()\nprint('-' * 40)\ntest_df.info()\ntrain_df.describe()\ntrain_df.describe(include=['O'])\ntrain_df[train_df.Age == 10].stack()\nplt.subplot(211)\nsns.countplot(x='Sex', data=train_df, palette='Greens_d')\nplt.subplot(212)\nsns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')\ntrain_df.groupby('Sex').size()\ntrain_df.groupby(['Sex'])['Survived'].mean().sort_values()\nplt.subplots(figsize=(8, 6))\nplt.subplot(211)\nsns.countplot(x='Pclass', data=train_df, palette='Purples_d')\nplt.subplot(212)\nsns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')\ntrain_df.groupby(['Pclass']).size()\ntrain_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)\ntrain_df.groupby('Age').size().head(25)\nage = train_df[['Age', 'Survived']].dropna()\nage['Age'] = age['Age'].astype(int)\nplt.subplots(figsize=(18, 6))\nplt.subplot(311)\nsns.kdeplot(age['Age'], shade=True, cut=0)\nplt.subplot(312)\nsns.countplot(x='Age', data=age, palette='GnBu_d')\nplt.subplot(313)\nsns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d')\ntrain_df['AgeGroup'] = pd.cut(train_df['Age'], [0, 4, 15, 25, 35, 45, 65, 100])\ntest_df['AgeGroup'] = pd.cut(test_df['Age'], [0, 4, 15, 25, 35, 45, 65, 100])\ntrain_df.groupby('AgeGroup')['Survived'].mean()\ntrain_df[['Survived', 'AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()\nsns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')\nsns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar'\n )\ntrain_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1\ntest_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1\nplt.subplot(211)\nsns.countplot(x='FamilySize', data=train_df)\nplt.subplot(212)\nsns.barplot(x='FamilySize', y='Survived', data=train_df)\ntrain_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])\ntrain_df[['PassengerId', 'Name', 'Cabin', 'Deck']].head(2).T\nplt.subplots(figsize=(8, 6))\nplt.subplot(211)\nsns.countplot(x='Deck', data=train_df)\nplt.subplot(212)\nsns.barplot(x='Deck', y='Survived', data=train_df)\n\n\ndef get_na(dataset):\n na_males = dataset[dataset.Sex == 'male'].loc[:, 'AgeGroup'].isnull().sum()\n na_females = dataset[dataset.Sex == 'female'].loc[:, 'AgeGroup'].isnull(\n ).sum()\n return {'male': na_males, 'female': na_females}\n\n\ndef get_counts(dataset):\n return dataset.groupby(['Sex', 'AgeGroup']).size()\n\n\ndef generate_age_groups(num, freq):\n age_groups = {}\n for sex in ['male', 'female']:\n relfreq = freq[sex] / freq[sex].sum()\n age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex],\n replace=True, p=relfreq)\n return age_groups\n\n\ndef insert_age_group_values(dataset, age_groups):\n for sex in ['male', 'female']:\n tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]\n )\n tmp['AgeGroup'] = age_groups[sex]\n dataset = dataset.combine_first(tmp)\n return dataset\n\n\nna = get_na(train_df)\ncounts = get_counts(train_df)\ncounts['female']\nage_groups = generate_age_groups(na, counts)\nage_groups['female']\ntrain_df = insert_age_group_values(train_df, age_groups)\ntrain_df.info()\nprint('-' * 40)\nna = get_na(test_df)\ncounts = get_counts(train_df)\nage_groups = generate_age_groups(na, counts)\ntest_df = insert_age_group_values(test_df, age_groups)\ntest_df.info()\ndummy = pd.get_dummies(train_df['Sex'])\ndummy.columns = ['Female', 'Male']\ntrain_df = train_df.join(dummy['Female'])\ndummy = pd.get_dummies(test_df['Sex'])\ndummy.columns = ['Female', 'Male']\ntest_df = test_df.join(dummy['Female'])\ntrain_df[['Name', 'Sex', 'Female']].head(2).T\ndummy = pd.get_dummies(train_df['Pclass'])\ndummy.columns = ['PClass_1', 'PClass_2', 'PClass_3']\ntrain_df = train_df.join(dummy[['PClass_1', 'PClass_2']])\ndummy = pd.get_dummies(test_df['Pclass'])\ndummy.columns = ['PClass_1', 'PClass_2', 'PClass_3']\ntest_df = test_df.join(dummy[['PClass_1', 'PClass_2']])\ntrain_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T\ndummy = pd.get_dummies(train_df['AgeGroup'])\ndummy.columns = ['Ages_4', 'Ages_15', 'Ages_25', 'Ages_35', 'Ages_45',\n 'Ages_65', 'Ages_100']\ntrain_df = train_df.join(dummy)\ndummy = pd.get_dummies(test_df['AgeGroup'])\ndummy.columns = ['Ages_4', 'Ages_15', 'Ages_25', 'Ages_35', 'Ages_45',\n 'Ages_65', 'Ages_100']\ntest_df = test_df.join(dummy)\ntest_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)\nprint(train_df.groupby('Embarked').size().sort_values())\ntrain_df['Embarked'] = train_df['Embarked'].fillna('S')\ndummy = pd.get_dummies(train_df['Embarked'])\ndummy.columns = ['Port_C', 'Port_Q', 'Port_S']\ndummy = pd.get_dummies(test_df['Embarked'])\ndummy.columns = ['Port_C', 'Port_Q', 'Port_S']\ntrain_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp',\n 'Parch', 'Ticket', 'Cabin', 'Fare', 'Embarked', 'Deck', 'AgeGroup'],\n axis=1, inplace=True)\ntest_df.drop(['Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket',\n 'Cabin', 'Fare', 'Embarked', 'AgeGroup'], axis=1, inplace=True)\ntrain_df.head(10).T\nX_train = train_df.drop('Survived', axis=1)\nY_train = train_df['Survived']\nX_test = test_df.drop('PassengerId', axis=1).copy()\nX_train.shape, Y_train.shape, X_test.shape\nlogreg = LogisticRegression()\nscores = cross_val_score(logreg, X_train, Y_train, cv=10)\nacc_log = round(scores.mean() * 100, 2)\nacc_log\nlogreg.fit(X_train, Y_train)\ncoeff_df = pd.DataFrame(train_df.columns.delete(0))\ncoeff_df.columns = ['Feature']\ncoeff_df['Correlation'] = pd.Series(logreg.coef_[0])\ncoeff_df.sort_values(by='Correlation', ascending=False)\ngaussian = GaussianNB()\nscores = cross_val_score(gaussian, X_train, Y_train, cv=10)\nacc_gaussian = round(scores.mean() * 100, 2)\nacc_gaussian\nperceptron = Perceptron()\nscores = cross_val_score(perceptron, X_train, Y_train, cv=10)\nacc_perceptron = round(scores.mean() * 100, 2)\nacc_perceptron\nneural_net = MLPClassifier()\nscores = cross_val_score(neural_net, X_train, Y_train, cv=10)\nacc_neural_net = round(scores.mean() * 100, 2)\nacc_neural_net\nsgd = SGDClassifier()\nscores = cross_val_score(sgd, X_train, Y_train, cv=10)\nacc_sgd = round(scores.mean() * 100, 2)\nacc_sgd\nlinear_svc = LinearSVC()\nscores = cross_val_score(linear_svc, X_train, Y_train, cv=10)\nacc_linear_svc = round(scores.mean() * 100, 2)\nacc_linear_svc\nsvc = SVC()\nscores = cross_val_score(svc, X_train, Y_train, cv=10)\nacc_svc = round(scores.mean() * 100, 2)\nacc_svc\ndecision_tree = DecisionTreeClassifier()\nscores = cross_val_score(decision_tree, X_train, Y_train, cv=10)\nacc_decision_tree = round(scores.mean() * 100, 2)\nacc_decision_tree\nrandom_forest = RandomForestClassifier(n_estimators=100)\nscores = cross_val_score(random_forest, X_train, Y_train, cv=10)\nacc_random_forest = round(scores.mean() * 100, 2)\nacc_random_forest\nada_boost = AdaBoostClassifier(n_estimators=100)\nscores = cross_val_score(ada_boost, X_train, Y_train, cv=10)\nacc_ada_boost = round(scores.mean() * 100, 2)\nacc_ada_boost\nknn = KNeighborsClassifier(n_neighbors=5)\nscores = cross_val_score(knn, X_train, Y_train, cv=10)\nacc_knn = round(scores.mean() * 100, 2)\nacc_knn\nmodels = pd.DataFrame({'Model': ['Support Vector Machine', 'kNN',\n 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron',\n 'Stochastic Gradient Descent', 'Linear SVC', 'Decision Tree',\n 'AdaBoost', 'Neural Network'], 'Score': [acc_svc, acc_knn, acc_log,\n acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd,\n acc_linear_svc, acc_decision_tree, acc_ada_boost, acc_neural_net]})\nmodels.sort_values(by='Score', ascending=False)\nrandom_forest.fit(X_train, Y_train)\nY_pred = random_forest.predict(X_test)\nsubmission = pd.DataFrame({'PassengerId': test_df['PassengerId'],\n 'Survived': Y_pred})\nsubmission.to_csv('titanic_submission_1.csv', index=False)\nrandom_forest = RandomForestClassifier(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nacc_random_forest\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport random as rnd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().magic(u'matplotlib inline')\nsns.set_style('whitegrid')\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neural_network import MLPClassifier\ntrain_df = pd.read_csv('../input/train.csv')\ntest_df = pd.read_csv('../input/test.csv')\ncombine = [train_df, test_df]\nprint(train_df.columns.values)\ntrain_df.head()\ntrain_df.head(3).T\ntrain_df.info()\nprint('-' * 40)\ntest_df.info()\ntrain_df.describe()\ntrain_df.describe(include=['O'])\ntrain_df[train_df.Age == 10].stack()\nplt.subplot(211)\nsns.countplot(x='Sex', data=train_df, palette='Greens_d')\nplt.subplot(212)\nsns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')\ntrain_df.groupby('Sex').size()\ntrain_df.groupby(['Sex'])['Survived'].mean().sort_values()\nplt.subplots(figsize=(8, 6))\nplt.subplot(211)\nsns.countplot(x='Pclass', data=train_df, palette='Purples_d')\nplt.subplot(212)\nsns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')\ntrain_df.groupby(['Pclass']).size()\ntrain_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)\ntrain_df.groupby('Age').size().head(25)\nage = train_df[['Age', 'Survived']].dropna()\nage['Age'] = age['Age'].astype(int)\nplt.subplots(figsize=(18, 6))\nplt.subplot(311)\nsns.kdeplot(age['Age'], shade=True, cut=0)\nplt.subplot(312)\nsns.countplot(x='Age', data=age, palette='GnBu_d')\nplt.subplot(313)\nsns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d')\ntrain_df['AgeGroup'] = pd.cut(train_df['Age'], [0, 4, 15, 25, 35, 45, 65, 100])\ntest_df['AgeGroup'] = pd.cut(test_df['Age'], [0, 4, 15, 25, 35, 45, 65, 100])\ntrain_df.groupby('AgeGroup')['Survived'].mean()\ntrain_df[['Survived', 'AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()\nsns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')\nsns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar'\n )\ntrain_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1\ntest_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1\nplt.subplot(211)\nsns.countplot(x='FamilySize', data=train_df)\nplt.subplot(212)\nsns.barplot(x='FamilySize', y='Survived', data=train_df)\ntrain_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])\ntrain_df[['PassengerId', 'Name', 'Cabin', 'Deck']].head(2).T\nplt.subplots(figsize=(8, 6))\nplt.subplot(211)\nsns.countplot(x='Deck', data=train_df)\nplt.subplot(212)\nsns.barplot(x='Deck', y='Survived', data=train_df)\n\n\ndef get_na(dataset):\n na_males = dataset[dataset.Sex == 'male'].loc[:, 'AgeGroup'].isnull().sum()\n na_females = dataset[dataset.Sex == 'female'].loc[:, 'AgeGroup'].isnull(\n ).sum()\n return {'male': na_males, 'female': na_females}\n\n\ndef get_counts(dataset):\n return dataset.groupby(['Sex', 'AgeGroup']).size()\n\n\ndef generate_age_groups(num, freq):\n age_groups = {}\n for sex in ['male', 'female']:\n relfreq = freq[sex] / freq[sex].sum()\n age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex],\n replace=True, p=relfreq)\n return age_groups\n\n\ndef insert_age_group_values(dataset, age_groups):\n for sex in ['male', 'female']:\n tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]\n )\n tmp['AgeGroup'] = age_groups[sex]\n dataset = dataset.combine_first(tmp)\n return dataset\n\n\nna = get_na(train_df)\ncounts = get_counts(train_df)\ncounts['female']\nage_groups = generate_age_groups(na, counts)\nage_groups['female']\ntrain_df = insert_age_group_values(train_df, age_groups)\ntrain_df.info()\nprint('-' * 40)\nna = get_na(test_df)\ncounts = get_counts(train_df)\nage_groups = generate_age_groups(na, counts)\ntest_df = insert_age_group_values(test_df, age_groups)\ntest_df.info()\ndummy = pd.get_dummies(train_df['Sex'])\ndummy.columns = ['Female', 'Male']\ntrain_df = train_df.join(dummy['Female'])\ndummy = pd.get_dummies(test_df['Sex'])\ndummy.columns = ['Female', 'Male']\ntest_df = test_df.join(dummy['Female'])\ntrain_df[['Name', 'Sex', 'Female']].head(2).T\ndummy = pd.get_dummies(train_df['Pclass'])\ndummy.columns = ['PClass_1', 'PClass_2', 'PClass_3']\ntrain_df = train_df.join(dummy[['PClass_1', 'PClass_2']])\ndummy = pd.get_dummies(test_df['Pclass'])\ndummy.columns = ['PClass_1', 'PClass_2', 'PClass_3']\ntest_df = test_df.join(dummy[['PClass_1', 'PClass_2']])\ntrain_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T\ndummy = pd.get_dummies(train_df['AgeGroup'])\ndummy.columns = ['Ages_4', 'Ages_15', 'Ages_25', 'Ages_35', 'Ages_45',\n 'Ages_65', 'Ages_100']\ntrain_df = train_df.join(dummy)\ndummy = pd.get_dummies(test_df['AgeGroup'])\ndummy.columns = ['Ages_4', 'Ages_15', 'Ages_25', 'Ages_35', 'Ages_45',\n 'Ages_65', 'Ages_100']\ntest_df = test_df.join(dummy)\ntest_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)\nprint(train_df.groupby('Embarked').size().sort_values())\ntrain_df['Embarked'] = train_df['Embarked'].fillna('S')\ndummy = pd.get_dummies(train_df['Embarked'])\ndummy.columns = ['Port_C', 'Port_Q', 'Port_S']\ndummy = pd.get_dummies(test_df['Embarked'])\ndummy.columns = ['Port_C', 'Port_Q', 'Port_S']\ntrain_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp',\n 'Parch', 'Ticket', 'Cabin', 'Fare', 'Embarked', 'Deck', 'AgeGroup'],\n axis=1, inplace=True)\ntest_df.drop(['Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket',\n 'Cabin', 'Fare', 'Embarked', 'AgeGroup'], axis=1, inplace=True)\ntrain_df.head(10).T\nX_train = train_df.drop('Survived', axis=1)\nY_train = train_df['Survived']\nX_test = test_df.drop('PassengerId', axis=1).copy()\nX_train.shape, Y_train.shape, X_test.shape\nlogreg = LogisticRegression()\nscores = cross_val_score(logreg, X_train, Y_train, cv=10)\nacc_log = round(scores.mean() * 100, 2)\nacc_log\nlogreg.fit(X_train, Y_train)\ncoeff_df = pd.DataFrame(train_df.columns.delete(0))\ncoeff_df.columns = ['Feature']\ncoeff_df['Correlation'] = pd.Series(logreg.coef_[0])\ncoeff_df.sort_values(by='Correlation', ascending=False)\ngaussian = GaussianNB()\nscores = cross_val_score(gaussian, X_train, Y_train, cv=10)\nacc_gaussian = round(scores.mean() * 100, 2)\nacc_gaussian\nperceptron = Perceptron()\nscores = cross_val_score(perceptron, X_train, Y_train, cv=10)\nacc_perceptron = round(scores.mean() * 100, 2)\nacc_perceptron\nneural_net = MLPClassifier()\nscores = cross_val_score(neural_net, X_train, Y_train, cv=10)\nacc_neural_net = round(scores.mean() * 100, 2)\nacc_neural_net\nsgd = SGDClassifier()\nscores = cross_val_score(sgd, X_train, Y_train, cv=10)\nacc_sgd = round(scores.mean() * 100, 2)\nacc_sgd\nlinear_svc = LinearSVC()\nscores = cross_val_score(linear_svc, X_train, Y_train, cv=10)\nacc_linear_svc = round(scores.mean() * 100, 2)\nacc_linear_svc\nsvc = SVC()\nscores = cross_val_score(svc, X_train, Y_train, cv=10)\nacc_svc = round(scores.mean() * 100, 2)\nacc_svc\ndecision_tree = DecisionTreeClassifier()\nscores = cross_val_score(decision_tree, X_train, Y_train, cv=10)\nacc_decision_tree = round(scores.mean() * 100, 2)\nacc_decision_tree\nrandom_forest = RandomForestClassifier(n_estimators=100)\nscores = cross_val_score(random_forest, X_train, Y_train, cv=10)\nacc_random_forest = round(scores.mean() * 100, 2)\nacc_random_forest\nada_boost = AdaBoostClassifier(n_estimators=100)\nscores = cross_val_score(ada_boost, X_train, Y_train, cv=10)\nacc_ada_boost = round(scores.mean() * 100, 2)\nacc_ada_boost\nknn = KNeighborsClassifier(n_neighbors=5)\nscores = cross_val_score(knn, X_train, Y_train, cv=10)\nacc_knn = round(scores.mean() * 100, 2)\nacc_knn\nmodels = pd.DataFrame({'Model': ['Support Vector Machine', 'kNN',\n 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron',\n 'Stochastic Gradient Descent', 'Linear SVC', 'Decision Tree',\n 'AdaBoost', 'Neural Network'], 'Score': [acc_svc, acc_knn, acc_log,\n acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd,\n acc_linear_svc, acc_decision_tree, acc_ada_boost, acc_neural_net]})\nmodels.sort_values(by='Score', ascending=False)\nrandom_forest.fit(X_train, Y_train)\nY_pred = random_forest.predict(X_test)\nsubmission = pd.DataFrame({'PassengerId': test_df['PassengerId'],\n 'Survived': Y_pred})\nsubmission.to_csv('titanic_submission_1.csv', index=False)\nrandom_forest = RandomForestClassifier(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nacc_random_forest\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# Predicting Surviving the Sinking of the Titanic\n# -----------------------------------------------\n# \n# \n# This represents my first attempt at training up some classifiers for the titanic dataset.\n\n# In[ ]:\n\n\n# data analysis and wrangling\nimport pandas as pd\nimport numpy as np\nimport random as rnd\n\n# visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().magic(u'matplotlib inline')\nsns.set_style(\"whitegrid\")\n\n# machine learning\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neural_network import MLPClassifier\n\n\n# In[ ]:\n\n\n# get titanic & test csv files as a DataFrame\ntrain_df = pd.read_csv(\"../input/train.csv\")\ntest_df = pd.read_csv(\"../input/test.csv\")\ncombine = [train_df, test_df]\n\n\n# # Data exploration #\n# \n# First get some summary statistics about the datasets.\n\n# In[ ]:\n\n\n# view column labels\nprint(train_df.columns.values)\n\n\n# In[ ]:\n\n\n# preview the data\ntrain_df.head()\n\n\n# Now transpose the first few rows in order to see all attributes more easily as row labels.\n\n# In[ ]:\n\n\ntrain_df.head(3).T\n\n\n# In[ ]:\n\n\n# missing values, data types\ntrain_df.info()\nprint('-'*40)\ntest_df.info()\n\n\n# The above info shows that columns (from training data) with missing/empty values are:\n# \n# - Age (177 missing values)\n# - Cabin (687 missing values)\n# - Embarked (2 missing values)\n\n# In[ ]:\n\n\n# describe numeric columns\ntrain_df.describe()\n\n\n# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.\n# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.\n\n# In[ ]:\n\n\n# describe categorical columns\ntrain_df.describe(include=['O'])\n\n\n# In[ ]:\n\n\n# just for fun, examine the records of ten year olds (there are only two) \ntrain_df[train_df.Age == 10].stack()\n\n\n# # Detailed data investigation #\n# \n# A closer look at each of the attributes (columns) and their relationship to survival.\n\n# ##Sex##\n# \n# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.\n\n# In[ ]:\n\n\n# count passengers by sex\nplt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)\nsns.countplot(x='Sex', data=train_df, palette='Greens_d')\n\n# survival rate by sex\n# note that barplot plots mean() on y by default\nplt.subplot(212)\nsns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d') \n\n\n# **Observations:**\n# \n# - Many more males than females\n# - Survival rate of females much greater than males\n# \n# Let's get the actual numbers below using pandas.\n\n# In[ ]:\n\n\n# count passengers by sex\ntrain_df.groupby('Sex').size()\n\n\n# In[ ]:\n\n\n# survival rates by sex\ntrain_df.groupby(['Sex'])['Survived'].mean().sort_values()\n\n\n# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.\n\n# ##Passenger class##\n# \n# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.\n\n# In[ ]:\n\n\n# size of groups in passenger class\nplt.subplots(figsize=(8,6))\nplt.subplot(211) \nsns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette\n\n# survival rate by sex\nplt.subplot(212)\nsns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d') \n\n\n# **Observations:**\n# \n# - Three classes\n# - Most passengers travelled by 3rd class (more than half; see below)\n# - Survival rate increases with class\n# \n# Again, let's get the actual numbers below using pandas.\n\n# In[ ]:\n\n\n# count passengers by passenger class\ntrain_df.groupby(['Pclass']).size()\n\n\n# In[ ]:\n\n\n# survival rates by passenger class\ntrain_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)\n\n\n# ##Age##\n# \n# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.\n# \n# \n# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/\n\n# In[ ]:\n\n\n# count the number of passengers for first 25 ages\ntrain_df.groupby('Age').size().head(25)\n\n# another way to do the above\n#train_df['Age'].value_counts().sort_index().head(25) \n\n\n# In[ ]:\n\n\n# convert ages to ints\nage = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed\nage['Age'] = age['Age'].astype(int) # floors floats\n\n# count passengers by age (smoothed via gaussian kernels)\nplt.subplots(figsize=(18,6))\nplt.subplot(311)\nsns.kdeplot(age['Age'], shade=True, cut=0)\n\n# count passengers by age (no smoothing)\nplt.subplot(312)\nsns.countplot(x='Age', data=age, palette='GnBu_d')\n\n# survival rates by age\nplt.subplot(313)\nsns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default\n\n\n# Observations:\n# \n# - Under 16s tend to have the highest survival rates\n# - Very high survival rates at 53, 63 and 80\n# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.\n\n# ## Survival by age group and sex ##\n# \n# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.\n\n# In[ ]:\n\n\n# bin age into groups\ntrain_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])\ntest_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])\n\n# survival by age group\ntrain_df.groupby('AgeGroup')['Survived'].mean()\n\n\n# In[ ]:\n\n\n# survival by age group and sex\ntrain_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()\n\n\n# In[ ]:\n\n\n# count passengers by age group and sex\nsns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')\n\n# survival by age group and sex\nsns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')\n\n\n# The relationship between survival and age group looks very different for males and females:\n# \n# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men. \n# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females. \n# \n# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability. \n# \n# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males. \n\n# ## Family Size##\n# \n# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.\n\n# In[ ]:\n\n\n# calculate family size\ntrain_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1\ntest_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1\n\n# count passengers by age group and sex\nplt.subplot(211)\nsns.countplot(x='FamilySize', data=train_df)\n\n# survival by age group and sex\nplt.subplot(212)\nsns.barplot(x='FamilySize', y='Survived', data=train_df)\n\n\n# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.\n\n# Deck\n# ----\n# \n# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.\n\n# In[ ]:\n\n\n# deck is the first letter of cabin\ntrain_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])\ntrain_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T\n\n\n# In[ ]:\n\n\n# count passengers by the deck their cabin is on\nplt.subplots(figsize=(8,6))\nplt.subplot(211) \nsns.countplot(x='Deck', data=train_df)\n\n# survival rate by deck\nplt.subplot(212)\nsns.barplot(x='Deck', y='Survived', data=train_df) \n\n\n# ## Other attributes ##\n# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:\n# \n# - PassengerId\n# - Name (however, extracting titles from names might be informative)\n# - Ticket\n# - Fare (could be related to socioeconomic status but we already have a class attribute)\n# - Embarked\n\n# # Data wrangling - Age group#\n# \n# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.\n\n# In[ ]:\n\n\n# number of males/females without an age\ndef get_na(dataset):\n na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()\n na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()\n return {'male': na_males, 'female': na_females}\n\n# number of males and females by age group\ndef get_counts(dataset):\n return dataset.groupby(['Sex', 'AgeGroup']).size()\n\n# randomly generate a list of age groups based on age group frequency (for each sex separately) \ndef generate_age_groups(num, freq):\n age_groups = {}\n for sex in ['male','female']:\n relfreq = freq[sex] / freq[sex].sum()\n age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq) \n return age_groups\n\n# insert the new age group values\ndef insert_age_group_values(dataset, age_groups):\n for sex in ['male','female']:\n tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages \n tmp['AgeGroup'] = age_groups[sex] # index age group values\n dataset = dataset.combine_first(tmp) # uses tmp to fill holes\n return dataset\n\n# fill holes for train_df\nna = get_na(train_df)\ncounts = get_counts(train_df)\ncounts['female']\nage_groups = generate_age_groups(na, counts)\nage_groups['female']\ntrain_df = insert_age_group_values(train_df, age_groups)\ntrain_df.info() # check all nulls have been filled \nprint('-'*40)\n\n# repeat for test_df\nna = get_na(test_df)\ncounts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger\nage_groups = generate_age_groups(na, counts)\ntest_df = insert_age_group_values(test_df, age_groups)\ntest_df.info() # check all nulls have been filled \n\n\n# # Feature engineering #\n# \n# Now that we've explored the data let's create some features:\n# \n# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.\n# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.\n# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.\n# - **Family size:** The sum of SibSp and Parch plus 1.\n\n# In[ ]:\n\n\n# Sex -> Female\n\n# training set\ndummy = pd.get_dummies(train_df['Sex'])\ndummy.columns = ['Female','Male']\ntrain_df = train_df.join(dummy['Female'])\n\n# test set\ndummy = pd.get_dummies(test_df['Sex'])\ndummy.columns = ['Female','Male']\ntest_df = test_df.join(dummy['Female'])\n\ntrain_df[['Name', 'Sex', 'Female']].head(2).T\n#train_df.columns\n\n\n# In[ ]:\n\n\n# Pclass -> PClass_1, PClass_2\n\n# training set\ndummy = pd.get_dummies(train_df['Pclass'])\ndummy.columns = ['PClass_1','PClass_2','PClass_3']\ntrain_df = train_df.join(dummy[['PClass_1', 'PClass_2']])\n\n# test set\ndummy = pd.get_dummies(test_df['Pclass'])\ndummy.columns = ['PClass_1','PClass_2','PClass_3']\ntest_df = test_df.join(dummy[['PClass_1', 'PClass_2']])\n\ntrain_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T\n#train_df.columns\n\n\n# In[ ]:\n\n\n# AgeGroup -> binary features\n\n# training set\ndummy = pd.get_dummies(train_df['AgeGroup'])\ndummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']\ntrain_df = train_df.join(dummy)\n\n# test set\ndummy = pd.get_dummies(test_df['AgeGroup'])\ndummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']\ntest_df = test_df.join(dummy)\n\n\n# ## Experimental features ##\n# Some additional features to explore.\n\n# In[ ]:\n\n\n# Fare\n\n# there is a single missing \"Fare\" value\ntest_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)\n\n# convert from float to int (floor)\n#train_df['Fare'] = train_df['Fare'].astype(int)\n#test_df['Fare'] = test_df['Fare'].astype(int)\n\n\n# In[ ]:\n\n\n# Embarked -> PortC, PortQ\n\n# Fill missing values with the most occurred value\nprint(train_df.groupby('Embarked').size().sort_values())\ntrain_df['Embarked'] = train_df['Embarked'].fillna('S')\n\n# training set\ndummy = pd.get_dummies(train_df['Embarked'])\n#dummy.columns\ndummy.columns = ['Port_C','Port_Q','Port_S']\n#train_df = train_df.join(dummy[['Port_C','Port_Q']])\n\n# test set\ndummy = pd.get_dummies(test_df['Embarked'])\ndummy.columns = ['Port_C','Port_Q','Port_S']\n#test_df = test_df.join(dummy[['Port_C','Port_Q']])\n\n\n# ## Dropping attributes ##\n# Drop unused attributes to avoid detecting spurious relationships.\n\n# In[ ]:\n\n\n# drop the attributes that will be unused\ntrain_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', \n 'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare', \n 'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)\n\ntest_df.drop(['Pclass', 'Name', 'Sex', 'Age', \n 'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',\n 'Embarked', 'AgeGroup'], axis=1, inplace=True)\n\ntrain_df.head(10).T\n\n\n# The sample above shows the features and their values for the first ten training examples.\n\n# # Modeling #\n# \n# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs. \n# \n# Suitable methods for performing classification include:\n# \n# - Logistic Regression*\n# - Perceptron*\n# - Support Vector Machines (SVMs)* \n# - Naive Bayes classifier* \n# - KNN or k-Nearest Neighbors\n# - Decision Tree\n# - Random Forrest\n# - Artificial neural network\n# - Relevance Vector Machine\n# \n# The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).\n\n# ## Training data ##\n# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation.\n\n# In[ ]:\n\n\n# split the datasets into matched input and ouput pairs\nX_train = train_df.drop(\"Survived\", axis=1) # X = inputs\nY_train = train_df[\"Survived\"] # Y = outputs\nX_test = test_df.drop(\"PassengerId\", axis=1).copy()\nX_train.shape, Y_train.shape, X_test.shape\n\n\n# Model fitting\n# ----------\n# (Some of this section is based on [this titanic tutorial][1].)\n# \n# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].\n# \n# Note the confidence score generated by the model based on our training dataset.\n# \n# \n# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions\n# [2]: https://en.wikipedia.org/wiki/Logistic_regression\n\n# In[ ]:\n\n\n# Logistic Regression\n\nlogreg = LogisticRegression()\nscores = cross_val_score(logreg, X_train, Y_train, cv=10)\nacc_log = round(scores.mean() * 100, 2)\nacc_log\n#Y_pred = logreg.predict(X_test)\n\n\n# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.\n# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).\n\n# In[ ]:\n\n\nlogreg.fit(X_train, Y_train)\ncoeff_df = pd.DataFrame(train_df.columns.delete(0))\ncoeff_df.columns = ['Feature']\ncoeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\n\ncoeff_df.sort_values(by='Correlation', ascending=False)\n\n\n# In[ ]:\n\n\n# Gaussian Naive Bayes\n\ngaussian = GaussianNB()\nscores = cross_val_score(gaussian, X_train, Y_train, cv=10)\nacc_gaussian = round(scores.mean() * 100, 2)\nacc_gaussian\n\n\n# In[ ]:\n\n\n# Perceptron (a single layer neural net)\n\nperceptron = Perceptron()\nscores = cross_val_score(perceptron, X_train, Y_train, cv=10)\nacc_perceptron = round(scores.mean() * 100, 2)\nacc_perceptron\n\n\n# In[ ]:\n\n\n# Neural Network (a multi layer neural net)\n\nneural_net = MLPClassifier()\nscores = cross_val_score(neural_net, X_train, Y_train, cv=10)\nacc_neural_net = round(scores.mean() * 100, 2)\nacc_neural_net\n\n\n# In[ ]:\n\n\n# Stochastic Gradient Descent\n\nsgd = SGDClassifier()\nscores = cross_val_score(sgd, X_train, Y_train, cv=10)\nacc_sgd = round(scores.mean() * 100, 2)\nacc_sgd\n\n\n# In[ ]:\n\n\n# Linear SVC\n\nlinear_svc = LinearSVC()\nscores = cross_val_score(linear_svc, X_train, Y_train, cv=10)\nacc_linear_svc = round(scores.mean() * 100, 2)\nacc_linear_svc\n\n\n# In[ ]:\n\n\n# Support Vector Machine\n\nsvc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)\nscores = cross_val_score(svc, X_train, Y_train, cv=10)\nacc_svc = round(scores.mean() * 100, 2)\nacc_svc\n\n\n# In[ ]:\n\n\n# Decision Tree\n\ndecision_tree = DecisionTreeClassifier()\nscores = cross_val_score(decision_tree, X_train, Y_train, cv=10)\nacc_decision_tree = round(scores.mean() * 100, 2)\nacc_decision_tree\n\n\n# In[ ]:\n\n\n# Random Forest - an ensemble model\n\nrandom_forest = RandomForestClassifier(n_estimators=100)\nscores = cross_val_score(random_forest, X_train, Y_train, cv=10)\nacc_random_forest = round(scores.mean() * 100, 2)\nacc_random_forest\n\n\n# In[ ]:\n\n\n# AdaBoost - an ensemble method\n\nada_boost = AdaBoostClassifier(n_estimators=100)\nscores = cross_val_score(ada_boost, X_train, Y_train, cv=10)\nacc_ada_boost = round(scores.mean() * 100, 2)\nacc_ada_boost\n\n\n# In[ ]:\n\n\n# k-Nearest Neighbors - a non-parametric method\n\nknn = KNeighborsClassifier(n_neighbors = 5)\nscores = cross_val_score(knn, X_train, Y_train, cv=10)\nacc_knn = round(scores.mean() * 100, 2)\nacc_knn\n\n\n# Model evaluation\n# ----------------\n# \n# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart. \n# \n# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.\n\n# In[ ]:\n\n\nmodels = pd.DataFrame({\n 'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression', \n 'Random Forest', 'Naive Bayes', 'Perceptron', \n 'Stochastic Gradient Descent', 'Linear SVC', \n 'Decision Tree', 'AdaBoost', 'Neural Network'],\n 'Score': [acc_svc, acc_knn, acc_log, \n acc_random_forest, acc_gaussian, acc_perceptron, \n acc_sgd, acc_linear_svc, acc_decision_tree, \n acc_ada_boost, acc_neural_net]})\nmodels.sort_values(by='Score', ascending=False)\n\n\n# In[ ]:\n\n\n# using random forest for submission\nrandom_forest.fit(X_train, Y_train)\nY_pred = random_forest.predict(X_test)\n\nsubmission = pd.DataFrame({\n \"PassengerId\": test_df[\"PassengerId\"],\n \"Survived\": Y_pred\n })\nsubmission.to_csv('titanic_submission_1.csv', index=False)\n#pd.set_option('display.max_rows', len(submission))\n#submission\n\n\n# Use cross validation to assess predictive accuracy\n# --------------------------------------------------\n# \n# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples. \n# \n\n# In[ ]:\n\n\n# Random Forest : scoring on training data\n\nrandom_forest = RandomForestClassifier(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nacc_random_forest\n\n\n# What next? \n# -------------------------------\n# \n# **_More feature exploration:_**\n# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.\n# \n# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.\n# \n# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).\n# \n# **_Refitting:_**\n# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).\n# \n# \n# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html\n# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
##############################################################################
#
# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.
# Yoshinori Okuji <[email protected]>
# Christophe Dumez <[email protected]>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
"""
Provide a feature not present into difflib, which is generate a colored diff
from a diff file/string.
This code is original form ERP5VCS and was moved to here for be used in
general ERP5.
XXX The organisation of DiffUtils should be reviewed and reorganised in a tool
if a general tool want to be provided.
"""
import os, re
from xml.sax.saxutils import escape
NBSP = ' '
NBSP_TAB = NBSP*8
NO_DIFF_COLOR = 'white'
MODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'#light orange
DELETED_DIFF_COLOR = 'rgb(253, 117, 74);'#light red
ADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'#light green
class DiffFile(object):
"""
# Members :
- path : path of the modified file
- children : sub codes modified
- old_revision
- new_revision
"""
def __init__(self, raw_diff):
self.children = []
self.binary = raw_diff and '@@' not in raw_diff
if self.binary or not raw_diff:
return
self.header = raw_diff.split('@@')[0][:-1]
# Getting file path in header
self.path = self.header.split('====')[0][:-1].strip()
# Getting revisions in header
for line in self.header.splitlines():
if line.startswith('--- '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.old_revision = line.replace("--- ", "")
if line.startswith('+++ '):
tmp = re.search('\\([^)]+\\)$', line)
if tmp is not None:
self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()
else:
self.new_revision = line.replace("+++ ", "")
# Splitting the body from the header
self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])
if not self.body.startswith('@@'):
self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])
# Now splitting modifications
first = True
tmp = []
for line in self.body.splitlines():
if line:
if line.startswith('@@') and not first:
self.children.append(CodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
else:
first = False
tmp.append(line)
self.children.append(CodeBlock(os.linesep.join(tmp)))
def __nonzero__(self):
return self.binary or bool(self.children)
def __len__(self):
return len(self.children)
toHTML__roles__ = None # public
def toHTML(self):
""" return HTML diff
"""
# Adding header of the table
if self.binary:
return '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'
if not self:
return ''
html_list = []
html_list.append('''
<table style="text-align: left; width: 100%%; border: 0;" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: grey; text-align: center; font-weight: bold;">%s</td>
</tr>''' % (self.old_revision, self.new_revision))
header_color = 'grey'
child_html_text = '''<tr><td style="background-color: %(headcolor)s">
</td><td style="background-color: black; width: 2px;"></td>
<td style="background-color: %(headcolor)s"> </td></tr><tr>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(oldline)s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: rgb(68, 132, 255);font-weight: bold;">Line %(newline)s</td>
</tr>'''
for child in self.children:
# Adding line number of the modification
html_list.append( child_html_text % {'headcolor':header_color, 'oldline':child.old_line, 'newline':child.new_line} )
header_color = 'white'
# Adding diff of the modification
old_code_list = child.getOldCodeList()
new_code_list = child.getNewCodeList()
i = 0
for old_line_tuple in old_code_list:
new_line_tuple = new_code_list[i]
new_line = new_line_tuple[0] or ' '
old_line = old_line_tuple[0] or ' '
i += 1
html_list.append( '''<tr style="font-family: monospace">
<td style="background-color: %s">%s</td>
<td style="background-color: black; width: 2px;"></td>
<td style="background-color: %s">%s</td>
</tr>'''%(old_line_tuple[1],
escape(old_line).replace(' ', NBSP).replace('\t', NBSP_TAB),
new_line_tuple[1],
escape(new_line).replace(' ', NBSP).replace('\t', NBSP_TAB))
)
html_list.append('''</tbody></table><br/>''')
return '\n'.join(html_list)
def getModifiedBlockList(self):
"""
Return a list of modified blocks
List contains tuples (block object : (old_modified_code, new_modified_code))
"""
if self.binary:
return []
block_list = []
for child in self.children:
old_line_list = [line.strip() for line, color in child.getOldCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
DELETED_DIFF_COLOR)]
new_line_list = [line.strip() for line, color in child.getNewCodeList()
if line is not None and color in (MODIFIED_DIFF_COLOR,
ADDITION_DIFF_COLOR)]
if old_line_list or new_line_list:
block_list.append((child,(old_line_list, new_line_list)))
return block_list
class CodeBlock:
"""
A code block contains several SubCodeBlocks
Members :
- old_line : line in old code (before modif)
- new line : line in new code (after modif)
Methods :
- getOldCodeList() : return code before modif
- getNewCodeList() : return code after modif
Note: the code returned is a list of tuples (code line, background color)
"""
def __init__(self, raw_diff):
# Splitting body and header
self.body = os.linesep.join(raw_diff.splitlines()[1:])
self.header = raw_diff.splitlines()[0]
# Getting modifications lines
tmp = re.search('^@@ -\d+', self.header)
self.old_line = tmp.string[tmp.start():tmp.end()][4:]
tmp = re.search('\+\d+', self.header)
self.new_line = tmp.string[tmp.start():tmp.end()][1:]
# Splitting modifications in SubCodeBlocks
in_modif = False
self.children = []
tmp = []
for line in self.body.splitlines():
if line:
if (line.startswith('+') or line.startswith('-')):
if in_modif:
tmp.append(line)
else:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = True
else:
if in_modif:
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
tmp = [line, ]
in_modif = False
else:
tmp.append(line)
self.children.append(SubCodeBlock(os.linesep.join(tmp)))
def getOldCodeList(self):
""" Return code before modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getOldCodeList())
return tmp
def getNewCodeList(self):
""" Return code after modification
"""
tmp = []
for child in self.children:
tmp.extend(child.getNewCodeList())
return tmp
class SubCodeBlock:
""" a SubCodeBlock contain 0 or 1 modification (not more)
"""
def __init__(self, code):
self.body = code
self.modification = self._getModif()
self.old_code_length = self._getOldCodeLength()
self.new_code_length = self._getNewCodeLength()
# Choosing background color
if self.modification == 'none':
self.color = NO_DIFF_COLOR
elif self.modification == 'change':
self.color = MODIFIED_DIFF_COLOR
elif self.modification == 'deletion':
self.color = DELETED_DIFF_COLOR
else: # addition
self.color = ADDITION_DIFF_COLOR
def _getModif(self):
""" Return type of modification :
addition, deletion, none
"""
nb_plus = 0
nb_minus = 0
for line in self.body.splitlines():
if line.startswith("-"):
nb_minus -= 1
elif line.startswith("+"):
nb_plus += 1
if (nb_plus == 0 and nb_minus == 0):
return 'none'
if (nb_minus == 0):
return 'addition'
if (nb_plus == 0):
return 'deletion'
return 'change'
def _getOldCodeLength(self):
""" Private function to return old code length
"""
nb_lines = 0
for line in self.body.splitlines():
if not line.startswith("+"):
nb_lines += 1
return nb_lines
def _getNewCodeLength(self):
""" Private function to return new code length
"""
nb_lines = 0
for line in self.body.splitlines():
if not line.startswith("-"):
nb_lines += 1
return nb_lines
def getOldCodeList(self):
""" Return code before modification
"""
if self.modification == 'none':
old_code = [(x, 'white') for x in self.body.splitlines()]
elif self.modification == 'change':
old_code = [self._getOldCodeList(x) for x in self.body.splitlines() \
if self._getOldCodeList(x)[0]]
# we want old_code_list and new_code_list to have the same length
if(self.old_code_length < self.new_code_length):
filling = [(None, self.color)] * (self.new_code_length - \
self.old_code_length)
old_code.extend(filling)
else: # deletion or addition
old_code = [self._getOldCodeList(x) for x in self.body.splitlines()]
return old_code
def _getOldCodeList(self, line):
""" Private function to return code before modification
"""
if line.startswith('+'):
return (None, self.color)
if line.startswith('-'):
return (' ' + line[1:], self.color)
return (line, self.color)
def getNewCodeList(self):
""" Return code after modification
"""
if self.modification == 'none':
new_code = [(x, 'white') for x in self.body.splitlines()]
elif self.modification == 'change':
new_code = [self._getNewCodeList(x) for x in self.body.splitlines() \
if self._getNewCodeList(x)[0]]
# we want old_code_list and new_code_list to have the same length
if(self.new_code_length < self.old_code_length):
filling = [(None, self.color)] * (self.old_code_length - \
self.new_code_length)
new_code.extend(filling)
else: # deletion or addition
new_code = [self._getNewCodeList(x) for x in self.body.splitlines()]
return new_code
def _getNewCodeList(self, line):
""" Private function to return code after modification
"""
if line.startswith('-'):
return (None, self.color)
if line.startswith('+'):
return (' ' + line[1:], self.color)
return (line, self.color)
|
normal
|
{
"blob_id": "ffb6379f2f2611fd8aa73f3a3c15fed4550d348f",
"index": 5920,
"step-1": "<mask token>\n\n\nclass CodeBlock:\n <mask token>\n\n def __init__(self, raw_diff):\n self.body = os.linesep.join(raw_diff.splitlines()[1:])\n self.header = raw_diff.splitlines()[0]\n tmp = re.search('^@@ -\\\\d+', self.header)\n self.old_line = tmp.string[tmp.start():tmp.end()][4:]\n tmp = re.search('\\\\+\\\\d+', self.header)\n self.new_line = tmp.string[tmp.start():tmp.end()][1:]\n in_modif = False\n self.children = []\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('+') or line.startswith('-'):\n if in_modif:\n tmp.append(line)\n else:\n self.children.append(SubCodeBlock(os.linesep.join(tmp))\n )\n tmp = [line]\n in_modif = True\n elif in_modif:\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n tmp = [line]\n in_modif = False\n else:\n tmp.append(line)\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n <mask token>\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp\n\n\nclass SubCodeBlock:\n \"\"\" a SubCodeBlock contain 0 or 1 modification (not more)\n \"\"\"\n\n def __init__(self, code):\n self.body = code\n self.modification = self._getModif()\n self.old_code_length = self._getOldCodeLength()\n self.new_code_length = self._getNewCodeLength()\n if self.modification == 'none':\n self.color = NO_DIFF_COLOR\n elif self.modification == 'change':\n self.color = MODIFIED_DIFF_COLOR\n elif self.modification == 'deletion':\n self.color = DELETED_DIFF_COLOR\n else:\n self.color = ADDITION_DIFF_COLOR\n\n def _getModif(self):\n \"\"\" Return type of modification :\n addition, deletion, none\n \"\"\"\n nb_plus = 0\n nb_minus = 0\n for line in self.body.splitlines():\n if line.startswith('-'):\n nb_minus -= 1\n elif line.startswith('+'):\n nb_plus += 1\n if nb_plus == 0 and nb_minus == 0:\n return 'none'\n if nb_minus == 0:\n return 'addition'\n if nb_plus == 0:\n return 'deletion'\n return 'change'\n\n def _getOldCodeLength(self):\n \"\"\" Private function to return old code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('+'):\n nb_lines += 1\n return nb_lines\n\n def _getNewCodeLength(self):\n \"\"\" Private function to return new code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('-'):\n nb_lines += 1\n return nb_lines\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.\n splitlines() if self._getOldCodeList(x)[0]]\n if self.old_code_length < self.new_code_length:\n filling = [(None, self.color)] * (self.new_code_length -\n self.old_code_length)\n old_code.extend(filling)\n else:\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()\n ]\n return old_code\n\n def _getOldCodeList(self, line):\n \"\"\" Private function to return code before modification\n \"\"\"\n if line.startswith('+'):\n return None, self.color\n if line.startswith('-'):\n return ' ' + line[1:], self.color\n return line, self.color\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.\n splitlines() if self._getNewCodeList(x)[0]]\n if self.new_code_length < self.old_code_length:\n filling = [(None, self.color)] * (self.old_code_length -\n self.new_code_length)\n new_code.extend(filling)\n else:\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()\n ]\n return new_code\n\n def _getNewCodeList(self, line):\n \"\"\" Private function to return code after modification\n \"\"\"\n if line.startswith('-'):\n return None, self.color\n if line.startswith('+'):\n return ' ' + line[1:], self.color\n return line, self.color\n",
"step-2": "<mask token>\n\n\nclass DiffFile(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __len__(self):\n return len(self.children)\n <mask token>\n\n def toHTML(self):\n \"\"\" return HTML diff\n \"\"\"\n if self.binary:\n return (\n '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'\n )\n if not self:\n return ''\n html_list = []\n html_list.append(\n \"\"\"\n <table style=\"text-align: left; width: 100%%; border: 0;\" cellpadding=\"0\" cellspacing=\"0\">\n <tbody>\n <tr>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n </tr>\"\"\"\n % (self.old_revision, self.new_revision))\n header_color = 'grey'\n child_html_text = \"\"\"<tr><td style=\"background-color: %(headcolor)s\">\n </td><td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %(headcolor)s\"> </td></tr><tr>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(oldline)s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(newline)s</td>\n </tr>\"\"\"\n for child in self.children:\n html_list.append(child_html_text % {'headcolor': header_color,\n 'oldline': child.old_line, 'newline': child.new_line})\n header_color = 'white'\n old_code_list = child.getOldCodeList()\n new_code_list = child.getNewCodeList()\n i = 0\n for old_line_tuple in old_code_list:\n new_line_tuple = new_code_list[i]\n new_line = new_line_tuple[0] or ' '\n old_line = old_line_tuple[0] or ' '\n i += 1\n html_list.append(\n \"\"\"<tr style=\"font-family: monospace\">\n <td style=\"background-color: %s\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %s\">%s</td>\n </tr>\"\"\"\n % (old_line_tuple[1], escape(old_line).replace(' ',\n NBSP).replace('\\t', NBSP_TAB), new_line_tuple[1],\n escape(new_line).replace(' ', NBSP).replace('\\t',\n NBSP_TAB)))\n html_list.append('</tbody></table><br/>')\n return '\\n'.join(html_list)\n\n def getModifiedBlockList(self):\n \"\"\"\n Return a list of modified blocks\n List contains tuples (block object : (old_modified_code, new_modified_code))\n \"\"\"\n if self.binary:\n return []\n block_list = []\n for child in self.children:\n old_line_list = [line.strip() for line, color in child.\n getOldCodeList() if line is not None and color in (\n MODIFIED_DIFF_COLOR, DELETED_DIFF_COLOR)]\n new_line_list = [line.strip() for line, color in child.\n getNewCodeList() if line is not None and color in (\n MODIFIED_DIFF_COLOR, ADDITION_DIFF_COLOR)]\n if old_line_list or new_line_list:\n block_list.append((child, (old_line_list, new_line_list)))\n return block_list\n\n\nclass CodeBlock:\n \"\"\"\n A code block contains several SubCodeBlocks\n Members :\n - old_line : line in old code (before modif)\n - new line : line in new code (after modif)\n\n Methods :\n - getOldCodeList() : return code before modif\n - getNewCodeList() : return code after modif\n Note: the code returned is a list of tuples (code line, background color)\n \"\"\"\n\n def __init__(self, raw_diff):\n self.body = os.linesep.join(raw_diff.splitlines()[1:])\n self.header = raw_diff.splitlines()[0]\n tmp = re.search('^@@ -\\\\d+', self.header)\n self.old_line = tmp.string[tmp.start():tmp.end()][4:]\n tmp = re.search('\\\\+\\\\d+', self.header)\n self.new_line = tmp.string[tmp.start():tmp.end()][1:]\n in_modif = False\n self.children = []\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('+') or line.startswith('-'):\n if in_modif:\n tmp.append(line)\n else:\n self.children.append(SubCodeBlock(os.linesep.join(tmp))\n )\n tmp = [line]\n in_modif = True\n elif in_modif:\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n tmp = [line]\n in_modif = False\n else:\n tmp.append(line)\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getOldCodeList())\n return tmp\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp\n\n\nclass SubCodeBlock:\n \"\"\" a SubCodeBlock contain 0 or 1 modification (not more)\n \"\"\"\n\n def __init__(self, code):\n self.body = code\n self.modification = self._getModif()\n self.old_code_length = self._getOldCodeLength()\n self.new_code_length = self._getNewCodeLength()\n if self.modification == 'none':\n self.color = NO_DIFF_COLOR\n elif self.modification == 'change':\n self.color = MODIFIED_DIFF_COLOR\n elif self.modification == 'deletion':\n self.color = DELETED_DIFF_COLOR\n else:\n self.color = ADDITION_DIFF_COLOR\n\n def _getModif(self):\n \"\"\" Return type of modification :\n addition, deletion, none\n \"\"\"\n nb_plus = 0\n nb_minus = 0\n for line in self.body.splitlines():\n if line.startswith('-'):\n nb_minus -= 1\n elif line.startswith('+'):\n nb_plus += 1\n if nb_plus == 0 and nb_minus == 0:\n return 'none'\n if nb_minus == 0:\n return 'addition'\n if nb_plus == 0:\n return 'deletion'\n return 'change'\n\n def _getOldCodeLength(self):\n \"\"\" Private function to return old code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('+'):\n nb_lines += 1\n return nb_lines\n\n def _getNewCodeLength(self):\n \"\"\" Private function to return new code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('-'):\n nb_lines += 1\n return nb_lines\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.\n splitlines() if self._getOldCodeList(x)[0]]\n if self.old_code_length < self.new_code_length:\n filling = [(None, self.color)] * (self.new_code_length -\n self.old_code_length)\n old_code.extend(filling)\n else:\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()\n ]\n return old_code\n\n def _getOldCodeList(self, line):\n \"\"\" Private function to return code before modification\n \"\"\"\n if line.startswith('+'):\n return None, self.color\n if line.startswith('-'):\n return ' ' + line[1:], self.color\n return line, self.color\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.\n splitlines() if self._getNewCodeList(x)[0]]\n if self.new_code_length < self.old_code_length:\n filling = [(None, self.color)] * (self.old_code_length -\n self.new_code_length)\n new_code.extend(filling)\n else:\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()\n ]\n return new_code\n\n def _getNewCodeList(self, line):\n \"\"\" Private function to return code after modification\n \"\"\"\n if line.startswith('-'):\n return None, self.color\n if line.startswith('+'):\n return ' ' + line[1:], self.color\n return line, self.color\n",
"step-3": "<mask token>\n\n\nclass DiffFile(object):\n <mask token>\n\n def __init__(self, raw_diff):\n self.children = []\n self.binary = raw_diff and '@@' not in raw_diff\n if self.binary or not raw_diff:\n return\n self.header = raw_diff.split('@@')[0][:-1]\n self.path = self.header.split('====')[0][:-1].strip()\n for line in self.header.splitlines():\n if line.startswith('--- '):\n tmp = re.search('\\\\([^)]+\\\\)$', line)\n if tmp is not None:\n self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1\n ].strip()\n else:\n self.old_revision = line.replace('--- ', '')\n if line.startswith('+++ '):\n tmp = re.search('\\\\([^)]+\\\\)$', line)\n if tmp is not None:\n self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1\n ].strip()\n else:\n self.new_revision = line.replace('+++ ', '')\n self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])\n if not self.body.startswith('@@'):\n self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])\n first = True\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('@@') and not first:\n self.children.append(CodeBlock(os.linesep.join(tmp)))\n tmp = [line]\n else:\n first = False\n tmp.append(line)\n self.children.append(CodeBlock(os.linesep.join(tmp)))\n\n def __nonzero__(self):\n return self.binary or bool(self.children)\n\n def __len__(self):\n return len(self.children)\n toHTML__roles__ = None\n\n def toHTML(self):\n \"\"\" return HTML diff\n \"\"\"\n if self.binary:\n return (\n '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'\n )\n if not self:\n return ''\n html_list = []\n html_list.append(\n \"\"\"\n <table style=\"text-align: left; width: 100%%; border: 0;\" cellpadding=\"0\" cellspacing=\"0\">\n <tbody>\n <tr>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n </tr>\"\"\"\n % (self.old_revision, self.new_revision))\n header_color = 'grey'\n child_html_text = \"\"\"<tr><td style=\"background-color: %(headcolor)s\">\n </td><td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %(headcolor)s\"> </td></tr><tr>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(oldline)s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(newline)s</td>\n </tr>\"\"\"\n for child in self.children:\n html_list.append(child_html_text % {'headcolor': header_color,\n 'oldline': child.old_line, 'newline': child.new_line})\n header_color = 'white'\n old_code_list = child.getOldCodeList()\n new_code_list = child.getNewCodeList()\n i = 0\n for old_line_tuple in old_code_list:\n new_line_tuple = new_code_list[i]\n new_line = new_line_tuple[0] or ' '\n old_line = old_line_tuple[0] or ' '\n i += 1\n html_list.append(\n \"\"\"<tr style=\"font-family: monospace\">\n <td style=\"background-color: %s\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %s\">%s</td>\n </tr>\"\"\"\n % (old_line_tuple[1], escape(old_line).replace(' ',\n NBSP).replace('\\t', NBSP_TAB), new_line_tuple[1],\n escape(new_line).replace(' ', NBSP).replace('\\t',\n NBSP_TAB)))\n html_list.append('</tbody></table><br/>')\n return '\\n'.join(html_list)\n\n def getModifiedBlockList(self):\n \"\"\"\n Return a list of modified blocks\n List contains tuples (block object : (old_modified_code, new_modified_code))\n \"\"\"\n if self.binary:\n return []\n block_list = []\n for child in self.children:\n old_line_list = [line.strip() for line, color in child.\n getOldCodeList() if line is not None and color in (\n MODIFIED_DIFF_COLOR, DELETED_DIFF_COLOR)]\n new_line_list = [line.strip() for line, color in child.\n getNewCodeList() if line is not None and color in (\n MODIFIED_DIFF_COLOR, ADDITION_DIFF_COLOR)]\n if old_line_list or new_line_list:\n block_list.append((child, (old_line_list, new_line_list)))\n return block_list\n\n\nclass CodeBlock:\n \"\"\"\n A code block contains several SubCodeBlocks\n Members :\n - old_line : line in old code (before modif)\n - new line : line in new code (after modif)\n\n Methods :\n - getOldCodeList() : return code before modif\n - getNewCodeList() : return code after modif\n Note: the code returned is a list of tuples (code line, background color)\n \"\"\"\n\n def __init__(self, raw_diff):\n self.body = os.linesep.join(raw_diff.splitlines()[1:])\n self.header = raw_diff.splitlines()[0]\n tmp = re.search('^@@ -\\\\d+', self.header)\n self.old_line = tmp.string[tmp.start():tmp.end()][4:]\n tmp = re.search('\\\\+\\\\d+', self.header)\n self.new_line = tmp.string[tmp.start():tmp.end()][1:]\n in_modif = False\n self.children = []\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('+') or line.startswith('-'):\n if in_modif:\n tmp.append(line)\n else:\n self.children.append(SubCodeBlock(os.linesep.join(tmp))\n )\n tmp = [line]\n in_modif = True\n elif in_modif:\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n tmp = [line]\n in_modif = False\n else:\n tmp.append(line)\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getOldCodeList())\n return tmp\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp\n\n\nclass SubCodeBlock:\n \"\"\" a SubCodeBlock contain 0 or 1 modification (not more)\n \"\"\"\n\n def __init__(self, code):\n self.body = code\n self.modification = self._getModif()\n self.old_code_length = self._getOldCodeLength()\n self.new_code_length = self._getNewCodeLength()\n if self.modification == 'none':\n self.color = NO_DIFF_COLOR\n elif self.modification == 'change':\n self.color = MODIFIED_DIFF_COLOR\n elif self.modification == 'deletion':\n self.color = DELETED_DIFF_COLOR\n else:\n self.color = ADDITION_DIFF_COLOR\n\n def _getModif(self):\n \"\"\" Return type of modification :\n addition, deletion, none\n \"\"\"\n nb_plus = 0\n nb_minus = 0\n for line in self.body.splitlines():\n if line.startswith('-'):\n nb_minus -= 1\n elif line.startswith('+'):\n nb_plus += 1\n if nb_plus == 0 and nb_minus == 0:\n return 'none'\n if nb_minus == 0:\n return 'addition'\n if nb_plus == 0:\n return 'deletion'\n return 'change'\n\n def _getOldCodeLength(self):\n \"\"\" Private function to return old code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('+'):\n nb_lines += 1\n return nb_lines\n\n def _getNewCodeLength(self):\n \"\"\" Private function to return new code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('-'):\n nb_lines += 1\n return nb_lines\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.\n splitlines() if self._getOldCodeList(x)[0]]\n if self.old_code_length < self.new_code_length:\n filling = [(None, self.color)] * (self.new_code_length -\n self.old_code_length)\n old_code.extend(filling)\n else:\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()\n ]\n return old_code\n\n def _getOldCodeList(self, line):\n \"\"\" Private function to return code before modification\n \"\"\"\n if line.startswith('+'):\n return None, self.color\n if line.startswith('-'):\n return ' ' + line[1:], self.color\n return line, self.color\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.\n splitlines() if self._getNewCodeList(x)[0]]\n if self.new_code_length < self.old_code_length:\n filling = [(None, self.color)] * (self.old_code_length -\n self.new_code_length)\n new_code.extend(filling)\n else:\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()\n ]\n return new_code\n\n def _getNewCodeList(self, line):\n \"\"\" Private function to return code after modification\n \"\"\"\n if line.startswith('-'):\n return None, self.color\n if line.startswith('+'):\n return ' ' + line[1:], self.color\n return line, self.color\n",
"step-4": "<mask token>\nNBSP = ' '\nNBSP_TAB = NBSP * 8\nNO_DIFF_COLOR = 'white'\nMODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'\nDELETED_DIFF_COLOR = 'rgb(253, 117, 74);'\nADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'\n\n\nclass DiffFile(object):\n \"\"\"\n # Members :\n - path : path of the modified file\n - children : sub codes modified\n - old_revision\n - new_revision\n \"\"\"\n\n def __init__(self, raw_diff):\n self.children = []\n self.binary = raw_diff and '@@' not in raw_diff\n if self.binary or not raw_diff:\n return\n self.header = raw_diff.split('@@')[0][:-1]\n self.path = self.header.split('====')[0][:-1].strip()\n for line in self.header.splitlines():\n if line.startswith('--- '):\n tmp = re.search('\\\\([^)]+\\\\)$', line)\n if tmp is not None:\n self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1\n ].strip()\n else:\n self.old_revision = line.replace('--- ', '')\n if line.startswith('+++ '):\n tmp = re.search('\\\\([^)]+\\\\)$', line)\n if tmp is not None:\n self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1\n ].strip()\n else:\n self.new_revision = line.replace('+++ ', '')\n self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])\n if not self.body.startswith('@@'):\n self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])\n first = True\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('@@') and not first:\n self.children.append(CodeBlock(os.linesep.join(tmp)))\n tmp = [line]\n else:\n first = False\n tmp.append(line)\n self.children.append(CodeBlock(os.linesep.join(tmp)))\n\n def __nonzero__(self):\n return self.binary or bool(self.children)\n\n def __len__(self):\n return len(self.children)\n toHTML__roles__ = None\n\n def toHTML(self):\n \"\"\" return HTML diff\n \"\"\"\n if self.binary:\n return (\n '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'\n )\n if not self:\n return ''\n html_list = []\n html_list.append(\n \"\"\"\n <table style=\"text-align: left; width: 100%%; border: 0;\" cellpadding=\"0\" cellspacing=\"0\">\n <tbody>\n <tr>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n </tr>\"\"\"\n % (self.old_revision, self.new_revision))\n header_color = 'grey'\n child_html_text = \"\"\"<tr><td style=\"background-color: %(headcolor)s\">\n </td><td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %(headcolor)s\"> </td></tr><tr>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(oldline)s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(newline)s</td>\n </tr>\"\"\"\n for child in self.children:\n html_list.append(child_html_text % {'headcolor': header_color,\n 'oldline': child.old_line, 'newline': child.new_line})\n header_color = 'white'\n old_code_list = child.getOldCodeList()\n new_code_list = child.getNewCodeList()\n i = 0\n for old_line_tuple in old_code_list:\n new_line_tuple = new_code_list[i]\n new_line = new_line_tuple[0] or ' '\n old_line = old_line_tuple[0] or ' '\n i += 1\n html_list.append(\n \"\"\"<tr style=\"font-family: monospace\">\n <td style=\"background-color: %s\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %s\">%s</td>\n </tr>\"\"\"\n % (old_line_tuple[1], escape(old_line).replace(' ',\n NBSP).replace('\\t', NBSP_TAB), new_line_tuple[1],\n escape(new_line).replace(' ', NBSP).replace('\\t',\n NBSP_TAB)))\n html_list.append('</tbody></table><br/>')\n return '\\n'.join(html_list)\n\n def getModifiedBlockList(self):\n \"\"\"\n Return a list of modified blocks\n List contains tuples (block object : (old_modified_code, new_modified_code))\n \"\"\"\n if self.binary:\n return []\n block_list = []\n for child in self.children:\n old_line_list = [line.strip() for line, color in child.\n getOldCodeList() if line is not None and color in (\n MODIFIED_DIFF_COLOR, DELETED_DIFF_COLOR)]\n new_line_list = [line.strip() for line, color in child.\n getNewCodeList() if line is not None and color in (\n MODIFIED_DIFF_COLOR, ADDITION_DIFF_COLOR)]\n if old_line_list or new_line_list:\n block_list.append((child, (old_line_list, new_line_list)))\n return block_list\n\n\nclass CodeBlock:\n \"\"\"\n A code block contains several SubCodeBlocks\n Members :\n - old_line : line in old code (before modif)\n - new line : line in new code (after modif)\n\n Methods :\n - getOldCodeList() : return code before modif\n - getNewCodeList() : return code after modif\n Note: the code returned is a list of tuples (code line, background color)\n \"\"\"\n\n def __init__(self, raw_diff):\n self.body = os.linesep.join(raw_diff.splitlines()[1:])\n self.header = raw_diff.splitlines()[0]\n tmp = re.search('^@@ -\\\\d+', self.header)\n self.old_line = tmp.string[tmp.start():tmp.end()][4:]\n tmp = re.search('\\\\+\\\\d+', self.header)\n self.new_line = tmp.string[tmp.start():tmp.end()][1:]\n in_modif = False\n self.children = []\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('+') or line.startswith('-'):\n if in_modif:\n tmp.append(line)\n else:\n self.children.append(SubCodeBlock(os.linesep.join(tmp))\n )\n tmp = [line]\n in_modif = True\n elif in_modif:\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n tmp = [line]\n in_modif = False\n else:\n tmp.append(line)\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getOldCodeList())\n return tmp\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp\n\n\nclass SubCodeBlock:\n \"\"\" a SubCodeBlock contain 0 or 1 modification (not more)\n \"\"\"\n\n def __init__(self, code):\n self.body = code\n self.modification = self._getModif()\n self.old_code_length = self._getOldCodeLength()\n self.new_code_length = self._getNewCodeLength()\n if self.modification == 'none':\n self.color = NO_DIFF_COLOR\n elif self.modification == 'change':\n self.color = MODIFIED_DIFF_COLOR\n elif self.modification == 'deletion':\n self.color = DELETED_DIFF_COLOR\n else:\n self.color = ADDITION_DIFF_COLOR\n\n def _getModif(self):\n \"\"\" Return type of modification :\n addition, deletion, none\n \"\"\"\n nb_plus = 0\n nb_minus = 0\n for line in self.body.splitlines():\n if line.startswith('-'):\n nb_minus -= 1\n elif line.startswith('+'):\n nb_plus += 1\n if nb_plus == 0 and nb_minus == 0:\n return 'none'\n if nb_minus == 0:\n return 'addition'\n if nb_plus == 0:\n return 'deletion'\n return 'change'\n\n def _getOldCodeLength(self):\n \"\"\" Private function to return old code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('+'):\n nb_lines += 1\n return nb_lines\n\n def _getNewCodeLength(self):\n \"\"\" Private function to return new code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith('-'):\n nb_lines += 1\n return nb_lines\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.\n splitlines() if self._getOldCodeList(x)[0]]\n if self.old_code_length < self.new_code_length:\n filling = [(None, self.color)] * (self.new_code_length -\n self.old_code_length)\n old_code.extend(filling)\n else:\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()\n ]\n return old_code\n\n def _getOldCodeList(self, line):\n \"\"\" Private function to return code before modification\n \"\"\"\n if line.startswith('+'):\n return None, self.color\n if line.startswith('-'):\n return ' ' + line[1:], self.color\n return line, self.color\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.\n splitlines() if self._getNewCodeList(x)[0]]\n if self.new_code_length < self.old_code_length:\n filling = [(None, self.color)] * (self.old_code_length -\n self.new_code_length)\n new_code.extend(filling)\n else:\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()\n ]\n return new_code\n\n def _getNewCodeList(self, line):\n \"\"\" Private function to return code after modification\n \"\"\"\n if line.startswith('-'):\n return None, self.color\n if line.startswith('+'):\n return ' ' + line[1:], self.color\n return line, self.color\n",
"step-5": "##############################################################################\n#\n# Copyright (c) 2005 Nexedi SARL and Contributors. All Rights Reserved.\n# Yoshinori Okuji <[email protected]>\n# Christophe Dumez <[email protected]>\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsability of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# garantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\n\"\"\"\n Provide a feature not present into difflib, which is generate a colored diff\n from a diff file/string.\n\n This code is original form ERP5VCS and was moved to here for be used in\n general ERP5.\n\n XXX The organisation of DiffUtils should be reviewed and reorganised in a tool\n if a general tool want to be provided.\n\"\"\"\nimport os, re\nfrom xml.sax.saxutils import escape\n\nNBSP = ' '\nNBSP_TAB = NBSP*8\nNO_DIFF_COLOR = 'white'\nMODIFIED_DIFF_COLOR = 'rgb(253, 228, 6);'#light orange\nDELETED_DIFF_COLOR = 'rgb(253, 117, 74);'#light red\nADDITION_DIFF_COLOR = 'rgb(83, 253, 74);'#light green\n\nclass DiffFile(object):\n \"\"\"\n # Members :\n - path : path of the modified file\n - children : sub codes modified\n - old_revision\n - new_revision\n \"\"\"\n\n def __init__(self, raw_diff):\n self.children = []\n self.binary = raw_diff and '@@' not in raw_diff\n if self.binary or not raw_diff:\n return\n self.header = raw_diff.split('@@')[0][:-1]\n # Getting file path in header\n self.path = self.header.split('====')[0][:-1].strip()\n # Getting revisions in header\n for line in self.header.splitlines():\n if line.startswith('--- '):\n tmp = re.search('\\\\([^)]+\\\\)$', line)\n if tmp is not None:\n self.old_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()\n else:\n self.old_revision = line.replace(\"--- \", \"\")\n if line.startswith('+++ '):\n tmp = re.search('\\\\([^)]+\\\\)$', line)\n if tmp is not None:\n self.new_revision = tmp.string[tmp.start():tmp.end()][1:-1].strip()\n else:\n self.new_revision = line.replace(\"+++ \", \"\")\n # Splitting the body from the header\n self.body = os.linesep.join(raw_diff.strip().splitlines()[3:])\n if not self.body.startswith('@@'):\n self.body = os.linesep.join(raw_diff.strip().splitlines()[4:])\n # Now splitting modifications\n first = True\n tmp = []\n for line in self.body.splitlines():\n if line:\n if line.startswith('@@') and not first:\n self.children.append(CodeBlock(os.linesep.join(tmp)))\n tmp = [line, ]\n else:\n first = False\n tmp.append(line)\n self.children.append(CodeBlock(os.linesep.join(tmp)))\n\n def __nonzero__(self):\n return self.binary or bool(self.children)\n\n def __len__(self):\n return len(self.children)\n\n toHTML__roles__ = None # public\n def toHTML(self):\n \"\"\" return HTML diff\n \"\"\"\n # Adding header of the table\n if self.binary:\n return '<b>Folder or binary file or just no changes!</b><br/><br/><br/>'\n\n if not self:\n return ''\n\n html_list = []\n html_list.append('''\n <table style=\"text-align: left; width: 100%%; border: 0;\" cellpadding=\"0\" cellspacing=\"0\">\n <tbody>\n <tr>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: grey; text-align: center; font-weight: bold;\">%s</td>\n </tr>''' % (self.old_revision, self.new_revision))\n header_color = 'grey'\n child_html_text = '''<tr><td style=\"background-color: %(headcolor)s\">\n </td><td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %(headcolor)s\"> </td></tr><tr>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(oldline)s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: rgb(68, 132, 255);font-weight: bold;\">Line %(newline)s</td>\n </tr>'''\n for child in self.children:\n # Adding line number of the modification\n html_list.append( child_html_text % {'headcolor':header_color, 'oldline':child.old_line, 'newline':child.new_line} )\n header_color = 'white'\n # Adding diff of the modification\n old_code_list = child.getOldCodeList()\n new_code_list = child.getNewCodeList()\n i = 0\n for old_line_tuple in old_code_list:\n new_line_tuple = new_code_list[i]\n new_line = new_line_tuple[0] or ' '\n old_line = old_line_tuple[0] or ' '\n i += 1\n html_list.append( '''<tr style=\"font-family: monospace\">\n <td style=\"background-color: %s\">%s</td>\n <td style=\"background-color: black; width: 2px;\"></td>\n <td style=\"background-color: %s\">%s</td>\n </tr>'''%(old_line_tuple[1],\n escape(old_line).replace(' ', NBSP).replace('\\t', NBSP_TAB),\n new_line_tuple[1],\n escape(new_line).replace(' ', NBSP).replace('\\t', NBSP_TAB))\n )\n html_list.append('''</tbody></table><br/>''')\n return '\\n'.join(html_list)\n\n def getModifiedBlockList(self):\n \"\"\"\n Return a list of modified blocks\n List contains tuples (block object : (old_modified_code, new_modified_code))\n \"\"\"\n if self.binary:\n return []\n block_list = []\n for child in self.children:\n old_line_list = [line.strip() for line, color in child.getOldCodeList()\n if line is not None and color in (MODIFIED_DIFF_COLOR,\n DELETED_DIFF_COLOR)]\n new_line_list = [line.strip() for line, color in child.getNewCodeList()\n if line is not None and color in (MODIFIED_DIFF_COLOR,\n ADDITION_DIFF_COLOR)]\n if old_line_list or new_line_list:\n block_list.append((child,(old_line_list, new_line_list)))\n return block_list\n\n\nclass CodeBlock:\n \"\"\"\n A code block contains several SubCodeBlocks\n Members :\n - old_line : line in old code (before modif)\n - new line : line in new code (after modif)\n\n Methods :\n - getOldCodeList() : return code before modif\n - getNewCodeList() : return code after modif\n Note: the code returned is a list of tuples (code line, background color)\n \"\"\"\n\n def __init__(self, raw_diff):\n # Splitting body and header\n self.body = os.linesep.join(raw_diff.splitlines()[1:])\n self.header = raw_diff.splitlines()[0]\n # Getting modifications lines\n tmp = re.search('^@@ -\\d+', self.header)\n self.old_line = tmp.string[tmp.start():tmp.end()][4:]\n tmp = re.search('\\+\\d+', self.header)\n self.new_line = tmp.string[tmp.start():tmp.end()][1:]\n # Splitting modifications in SubCodeBlocks\n in_modif = False\n self.children = []\n tmp = []\n for line in self.body.splitlines():\n if line:\n if (line.startswith('+') or line.startswith('-')):\n if in_modif:\n tmp.append(line)\n else:\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n tmp = [line, ]\n in_modif = True\n else:\n if in_modif:\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n tmp = [line, ]\n in_modif = False\n else:\n tmp.append(line)\n self.children.append(SubCodeBlock(os.linesep.join(tmp)))\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getOldCodeList())\n return tmp\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n tmp = []\n for child in self.children:\n tmp.extend(child.getNewCodeList())\n return tmp\n\nclass SubCodeBlock:\n \"\"\" a SubCodeBlock contain 0 or 1 modification (not more)\n \"\"\"\n def __init__(self, code):\n self.body = code\n self.modification = self._getModif()\n self.old_code_length = self._getOldCodeLength()\n self.new_code_length = self._getNewCodeLength()\n # Choosing background color\n if self.modification == 'none':\n self.color = NO_DIFF_COLOR\n elif self.modification == 'change':\n self.color = MODIFIED_DIFF_COLOR\n elif self.modification == 'deletion':\n self.color = DELETED_DIFF_COLOR\n else: # addition\n self.color = ADDITION_DIFF_COLOR\n\n def _getModif(self):\n \"\"\" Return type of modification :\n addition, deletion, none\n \"\"\"\n nb_plus = 0\n nb_minus = 0\n for line in self.body.splitlines():\n if line.startswith(\"-\"):\n nb_minus -= 1\n elif line.startswith(\"+\"):\n nb_plus += 1\n if (nb_plus == 0 and nb_minus == 0):\n return 'none'\n if (nb_minus == 0):\n return 'addition'\n if (nb_plus == 0):\n return 'deletion'\n return 'change'\n\n def _getOldCodeLength(self):\n \"\"\" Private function to return old code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines\n\n def _getNewCodeLength(self):\n \"\"\" Private function to return new code length\n \"\"\"\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines\n\n def getOldCodeList(self):\n \"\"\" Return code before modification\n \"\"\"\n if self.modification == 'none':\n old_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines() \\\n if self._getOldCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.old_code_length < self.new_code_length):\n filling = [(None, self.color)] * (self.new_code_length - \\\n self.old_code_length)\n old_code.extend(filling)\n else: # deletion or addition\n old_code = [self._getOldCodeList(x) for x in self.body.splitlines()]\n return old_code\n\n def _getOldCodeList(self, line):\n \"\"\" Private function to return code before modification\n \"\"\"\n if line.startswith('+'):\n return (None, self.color)\n if line.startswith('-'):\n return (' ' + line[1:], self.color)\n return (line, self.color)\n\n def getNewCodeList(self):\n \"\"\" Return code after modification\n \"\"\"\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines() \\\n if self._getNewCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.new_code_length < self.old_code_length):\n filling = [(None, self.color)] * (self.old_code_length - \\\n self.new_code_length)\n new_code.extend(filling)\n else: # deletion or addition\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()]\n return new_code\n\n def _getNewCodeList(self, line):\n \"\"\" Private function to return code after modification\n \"\"\"\n if line.startswith('-'):\n return (None, self.color)\n if line.startswith('+'):\n return (' ' + line[1:], self.color)\n return (line, self.color)\n",
"step-ids": [
13,
19,
22,
24,
26
]
}
|
[
13,
19,
22,
24,
26
] |
# coding: utf-8
'''
Created on 2013-7-8
@author: huqiming
'''
import json
import re
import urllib2
'''
图说内容
'''
class ts_content:
'''
图说标题
'''
title = ''
'''
图说日期
'''
date = ''
'''
图说段落
'''
parts = []
def __str__(self):
return 'parts: ' + str(self.parts)
'''
图说段落
'''
class ts_content_part(json.JSONEncoder):
'''
段落标题
'''
title = ''
'''
段落的子内容
'''
items = []
def __str__(self):
return 'title: ' + self.title + ' items: ' + str(self.items)
class ts_content_part_item(json.JSONEncoder):
txt_info = ''
img_url = ''
def __init__(self, txt, img):
if txt :
self.txt_info = txt
if img :
self.img_url = img
def __str__(self):
return 'info: ' + self.txt_info + ' img: ' + self.img_url
def parse_content(url):
# print(url)
page = urllib2.urlopen(url)
html = page.read()
source = html.decode('GBK')
parts = perform_parse_content(source)
result = ts_content()
result.parts = parts;
return result
def perform_parse_content(source):
li = re.finditer(ur'<P>\u3010\d*\u3011.*?</P>', source)
i = 0
index = []
res = []
for m in li:
title = m.group()
part = ts_content_part()
part.title = remove_tags(title)
res.append(part)
pos = m.start()
index.append(pos)
if(i > 0):
part_source = source[index[i - 1]:pos]
res_item = parse_content_part(part_source)
res[i - 1].items = res_item
i += 1
part_source = source[pos:source.index('<P> </P>')]
res_item = parse_content_part(part_source)
res[i - 1].items = res_item
return res
def parse_content_part(source):
li = re.finditer(r'<(P|DIV)>.*?</(P|DIV)>', source)
res = []
for m in li:
item = m.group()
img = parse_img_src(item)
txt = remove_tags(item)
res_item = ts_content_part_item(txt, img)
# print(res_item)
res.append(res_item)
return res
def parse_img_src(source):
m = re.search(r'<IMG.*?>', source)
if m:
img_tag = m.group()
img_m = re.search(r'src=".*?"', img_tag)
if img_m:
src = img_m.group()
src = src[5:-1]
return src
def remove_tags(source):
p = re.compile(r"(<.*?>|</.*?>|<|/>| )")
return p.sub('', source)
# res = parse('http://www.dapenti.com/blog/more.asp?name=xilei&id=79405')
# from ts_json import json_encode
# ss = json_encode().encode(res)
# print(ss)
|
normal
|
{
"blob_id": "094f482ec6d36dfaed7e908bc445e6e015ec409d",
"index": 2718,
"step-1": "# coding: utf-8\r\n'''\r\nCreated on 2013-7-8\r\n@author: huqiming\r\n'''\r\nimport json\r\nimport re\r\nimport urllib2\r\n'''\r\n图说内容\r\n'''\r\nclass ts_content:\r\n '''\r\n 图说标题\r\n '''\r\n title = ''\r\n '''\r\n 图说日期\r\n '''\r\n date = ''\r\n '''\r\n 图说段落\r\n '''\r\n parts = []\r\n def __str__(self):\r\n return 'parts: ' + str(self.parts)\r\n\r\n'''\r\n图说段落\r\n'''\r\nclass ts_content_part(json.JSONEncoder):\r\n '''\r\n 段落标题\r\n '''\r\n title = ''\r\n '''\r\n 段落的子内容\r\n '''\r\n items = []\r\n def __str__(self):\r\n return 'title: ' + self.title + ' items: ' + str(self.items)\r\n\r\nclass ts_content_part_item(json.JSONEncoder):\r\n txt_info = ''\r\n img_url = ''\r\n \r\n def __init__(self, txt, img):\r\n if txt :\r\n self.txt_info = txt\r\n if img : \r\n self.img_url = img\r\n \r\n def __str__(self):\r\n return 'info: ' + self.txt_info + ' img: ' + self.img_url\r\n \r\ndef parse_content(url):\r\n# print(url)\r\n page = urllib2.urlopen(url)\r\n html = page.read()\r\n source = html.decode('GBK')\r\n \r\n parts = perform_parse_content(source)\r\n result = ts_content()\r\n \r\n result.parts = parts;\r\n return result\r\n\r\ndef perform_parse_content(source):\r\n li = re.finditer(ur'<P>\\u3010\\d*\\u3011.*?</P>', source)\r\n i = 0\r\n\r\n index = []\r\n res = []\r\n for m in li:\r\n title = m.group()\r\n part = ts_content_part()\r\n part.title = remove_tags(title)\r\n res.append(part)\r\n \r\n pos = m.start()\r\n index.append(pos)\r\n \r\n if(i > 0):\r\n part_source = source[index[i - 1]:pos]\r\n res_item = parse_content_part(part_source)\r\n res[i - 1].items = res_item\r\n i += 1\r\n \r\n part_source = source[pos:source.index('<P> </P>')]\r\n res_item = parse_content_part(part_source)\r\n res[i - 1].items = res_item\r\n \r\n return res\r\n\r\ndef parse_content_part(source):\r\n li = re.finditer(r'<(P|DIV)>.*?</(P|DIV)>', source)\r\n res = []\r\n for m in li:\r\n item = m.group()\r\n img = parse_img_src(item)\r\n txt = remove_tags(item)\r\n res_item = ts_content_part_item(txt, img)\r\n# print(res_item)\r\n res.append(res_item)\r\n \r\n return res\r\n\r\ndef parse_img_src(source):\r\n m = re.search(r'<IMG.*?>', source)\r\n if m:\r\n img_tag = m.group()\r\n img_m = re.search(r'src=\".*?\"', img_tag)\r\n if img_m:\r\n src = img_m.group()\r\n src = src[5:-1]\r\n return src\r\n\r\ndef remove_tags(source):\r\n p = re.compile(r\"(<.*?>|</.*?>|<|/>| )\")\r\n return p.sub('', source)\r\n\r\n# res = parse('http://www.dapenti.com/blog/more.asp?name=xilei&id=79405')\r\n# from ts_json import json_encode\r\n# ss = json_encode().encode(res)\r\n# print(ss)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/python
# encode:utf-8
import subprocess
import sys
import pdb
argvs = sys.argv
if len(argvs) != 2:
print "Please input 1 argument"
quit()
searchWord = argvs[1]
cmd1 = "ls -a /etc/"
p1 = subprocess.Popen(cmd1.strip().split(" "), stdout=subprocess.PIPE)
stdout_data, stderr_data = p1.communicate()
p1.stdout.close()
if stderr_data != None:
print "Error", stderr_data
quit()
filelist = stdout_data.strip().split("\n")
for file in filelist:
if file.find(searchWord) != -1:
print file
|
normal
|
{
"blob_id": "c12d45644098aef5c042a62095eeae5829d70f45",
"index": 7641,
"step-1": "#! /usr/bin/python\n# encode:utf-8\nimport subprocess\nimport sys\nimport pdb\n\nargvs = sys.argv\nif len(argvs) != 2:\n print \"Please input 1 argument\"\n quit()\n\nsearchWord = argvs[1]\n\ncmd1 = \"ls -a /etc/\"\np1 = subprocess.Popen(cmd1.strip().split(\" \"), stdout=subprocess.PIPE)\nstdout_data, stderr_data = p1.communicate()\np1.stdout.close()\n\nif stderr_data != None:\n print \"Error\", stderr_data\n quit()\n \nfilelist = stdout_data.strip().split(\"\\n\")\nfor file in filelist:\n if file.find(searchWord) != -1:\n print file\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import surname_common as sc
from sklearn.utils import shuffle
import glob
import os
import re
import pprint
import pandas as pd
import unicodedata
import string
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS
)
def load_surnames():
df_surnames = pd.DataFrame()
list_ = []
for filename in glob.glob('data/names/*.txt'):
m = re.match(r'(.*)\/(.*?)\.txt', filename)
category = m.group(2)
df = pd.read_csv(filename,names=['surname'])
df['category'] = category
list_.append(df)
df_surnames = pd.concat(list_)
df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x))
series_categories = df_surnames.groupby(['category'])['category'].count()
df_categories = pd.DataFrame({
'category':series_categories.index,
'freq':series_categories.tolist(),
'index':range(0,len(series_categories))
})
return df_surnames, df_categories
def save_df_surnames_as_pickle():
df_surnames, df_categories = load_surnames()
# train test split
df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)
train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO)
train = df[0:train_cnt]
test = df[train_cnt+1:]
# save as pickle
df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2')
df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2')
train.to_pickle('data/pickles/train.pickle',compression='bz2')
test.to_pickle('data/pickles/test.pickle',compression='bz2')
# train test stat
t1 = train.groupby(['category']).count().drop(['normalized'],axis=1)
t2 = test.groupby(['category']).count().drop(['normalized'],axis=1)
t1.columns = ['surname_train']
t2.columns = ['surname_test']
tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))
tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test'])
tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2')
return tt
|
normal
|
{
"blob_id": "db46fbfb1acd855eebb5c9f557d70038b84e812d",
"index": 8573,
"step-1": "<mask token>\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-2": "<mask token>\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\n<mask token>\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-3": "<mask token>\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n for filename in glob.glob('data/names/*.txt'):\n m = re.match('(.*)\\\\/(.*?)\\\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename, names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_)\n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x:\n unicode_to_ascii(x))\n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({'category': series_categories.index,\n 'freq': series_categories.tolist(), 'index': range(0, len(\n series_categories))})\n return df_surnames, df_categories\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-4": "import surname_common as sc\nfrom sklearn.utils import shuffle\nimport glob\nimport os\nimport re\nimport pprint\nimport pandas as pd\nimport unicodedata\nimport string\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n for filename in glob.glob('data/names/*.txt'):\n m = re.match('(.*)\\\\/(.*?)\\\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename, names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_)\n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x:\n unicode_to_ascii(x))\n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({'category': series_categories.index,\n 'freq': series_categories.tolist(), 'index': range(0, len(\n series_categories))})\n return df_surnames, df_categories\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-5": "import surname_common as sc\nfrom sklearn.utils import shuffle\nimport glob\nimport os\nimport re\nimport pprint\nimport pandas as pd\nimport unicodedata\nimport string\n\n\ndef unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS\n )\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n\n for filename in glob.glob('data/names/*.txt'):\n m = re.match(r'(.*)\\/(.*?)\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename,names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_) \n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x))\n \n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({\n 'category':series_categories.index, \n 'freq':series_categories.tolist(), \n 'index':range(0,len(series_categories))\n })\n \n return df_surnames, df_categories\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n # train test split\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt+1:]\n # save as pickle\n df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2')\n train.to_pickle('data/pickles/train.pickle',compression='bz2')\n test.to_pickle('data/pickles/test.pickle',compression='bz2')\n # train test stat \n t1 = train.groupby(['category']).count().drop(['normalized'],axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'],axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2')\n return tt",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import forms
from django.forms import ModelForm, fields, widgets
from .models import NewsStory
class StoryForm(ModelForm):
class Meta:
model = NewsStory
fields = ['title' , 'pub_date' , 'content']
widgets = {
'pub_date': forms.DateInput(format=('%m/%d/%Y'), attrs={'class':'form-control', 'placeholder':'select a date', 'type':'date'}),
}
|
normal
|
{
"blob_id": "47a5ddcea2f6d8ce80793192d26c98ccc0e0340d",
"index": 1771,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StoryForm(ModelForm):\n\n\n class Meta:\n model = NewsStory\n fields = ['title', 'pub_date', 'content']\n widgets = {'pub_date': forms.DateInput(format='%m/%d/%Y', attrs={\n 'class': 'form-control', 'placeholder': 'select a date', 'type':\n 'date'})}\n",
"step-3": "from django import forms\nfrom django.forms import ModelForm, fields, widgets\nfrom .models import NewsStory\n\n\nclass StoryForm(ModelForm):\n\n\n class Meta:\n model = NewsStory\n fields = ['title', 'pub_date', 'content']\n widgets = {'pub_date': forms.DateInput(format='%m/%d/%Y', attrs={\n 'class': 'form-control', 'placeholder': 'select a date', 'type':\n 'date'})}\n",
"step-4": "from django import forms\nfrom django.forms import ModelForm, fields, widgets\nfrom .models import NewsStory\n\nclass StoryForm(ModelForm):\n class Meta:\n model = NewsStory\n fields = ['title' , 'pub_date' , 'content']\n widgets = {\n 'pub_date': forms.DateInput(format=('%m/%d/%Y'), attrs={'class':'form-control', 'placeholder':'select a date', 'type':'date'}),\n\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class StravaAuthConfig(AppConfig):
name = "strava.contrib.strava_django"
verbose_name = _("Strava Auth")
def ready(self):
pass
|
normal
|
{
"blob_id": "9e43eb3c3ab3be4e695dbc80aa005332b8d8a4ec",
"index": 9515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StravaAuthConfig(AppConfig):\n <mask token>\n <mask token>\n\n def ready(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass StravaAuthConfig(AppConfig):\n name = 'strava.contrib.strava_django'\n verbose_name = _('Strava Auth')\n\n def ready(self):\n pass\n",
"step-4": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass StravaAuthConfig(AppConfig):\n name = 'strava.contrib.strava_django'\n verbose_name = _('Strava Auth')\n\n def ready(self):\n pass\n",
"step-5": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass StravaAuthConfig(AppConfig):\n name = \"strava.contrib.strava_django\"\n verbose_name = _(\"Strava Auth\")\n\n def ready(self):\n pass\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from keras.layers import Dense, Activation, Dropout
from keras.utils.visualize_util import plot
from keras.models import Sequential
from emotions import FER2013Dataset
_deep_models = {}
def deep_model(model_name):
def wrapper(cls):
_deep_models[model_name] = cls
return cls
return wrapper
def get_model(model_name):
if model_name not in _deep_models:
available_models = ", ".join(_deep_models.keys())
raise ValueError(
"Model '%s' not found. Available models are: %s"
% (model_name, available_models))
return _deep_models[model_name]
def init_model(name, *args, **kwargs):
return get_model(name)(*args, **kwargs)
class DeepModel:
image_size = 48
n_pixels = image_size ** 2
n_classes = len(FER2013Dataset.VERBOSE_EMOTION)
def __init__(self, *args, **kwargs):
self.model = None
@property
def name(self):
return self.__class__.__name__
def build(self, **params):
raise NotImplementedError()
def show_structure(self, filename=None):
if not filename:
filename = self.name + '.png'
plot(self.model, to_file=filename)
@deep_model('trivial')
class DummyModel(DeepModel):
def build(self, **params):
model = Sequential()
model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))
model.add(Activation('relu'))
model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal'))
model.add(Activation('softmax'))
self.model = model
return model
@deep_model('simple')
class SimpleFeedforwardModel(DeepModel):
def build(self, init='normal', optimizer='adam', activation='relu',
output_activation='sigmoid'):
model = Sequential()
model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))
model.add(Activation(activation))
model.add(Dense(self.n_pixels * 2, init=init))
model.add(Activation(activation))
model.add(Dense(self.n_classes, init=init))
model.add(Activation(output_activation))
self.model = model
return model
@deep_model('dropout')
class DropoutFeedforwardModel(DeepModel):
def build(self, init='normal', optimizer='adam', activation='relu',
output_activation='sigmoid', dropout=0.2):
model = Sequential()
model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))
model.add(Activation(activation))
model.add(Dense(self.n_pixels * 2, init=init))
model.add(Activation(activation))
model.add(Dropout(dropout))
model.add(Dense(self.n_pixels * 4, init=init))
model.add(Activation(activation))
model.add(Dropout(dropout))
model.add(Dense(self.n_classes, init=init))
model.add(Activation(output_activation))
self.model = model
return model
|
normal
|
{
"blob_id": "36257340ebbc6bd2c7fa5995511b2c859f58f8e5",
"index": 3232,
"step-1": "<mask token>\n\n\nclass DeepModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n@deep_model('trivial')\nclass DummyModel(DeepModel):\n\n def build(self, **params):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))\n model.add(Activation('relu'))\n model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal')\n )\n model.add(Activation('softmax'))\n self.model = model\n return model\n\n\n@deep_model('simple')\nclass SimpleFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid'):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n\n\n@deep_model('dropout')\nclass DropoutFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid', dropout=0.2):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_pixels * 4, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n",
"step-2": "<mask token>\n\n\nclass DeepModel:\n image_size = 48\n n_pixels = image_size ** 2\n n_classes = len(FER2013Dataset.VERBOSE_EMOTION)\n\n def __init__(self, *args, **kwargs):\n self.model = None\n\n @property\n def name(self):\n return self.__class__.__name__\n\n def build(self, **params):\n raise NotImplementedError()\n\n def show_structure(self, filename=None):\n if not filename:\n filename = self.name + '.png'\n plot(self.model, to_file=filename)\n\n\n@deep_model('trivial')\nclass DummyModel(DeepModel):\n\n def build(self, **params):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))\n model.add(Activation('relu'))\n model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal')\n )\n model.add(Activation('softmax'))\n self.model = model\n return model\n\n\n@deep_model('simple')\nclass SimpleFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid'):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n\n\n@deep_model('dropout')\nclass DropoutFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid', dropout=0.2):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_pixels * 4, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n",
"step-3": "<mask token>\n\n\ndef get_model(model_name):\n if model_name not in _deep_models:\n available_models = ', '.join(_deep_models.keys())\n raise ValueError(\"Model '%s' not found. Available models are: %s\" %\n (model_name, available_models))\n return _deep_models[model_name]\n\n\n<mask token>\n\n\nclass DeepModel:\n image_size = 48\n n_pixels = image_size ** 2\n n_classes = len(FER2013Dataset.VERBOSE_EMOTION)\n\n def __init__(self, *args, **kwargs):\n self.model = None\n\n @property\n def name(self):\n return self.__class__.__name__\n\n def build(self, **params):\n raise NotImplementedError()\n\n def show_structure(self, filename=None):\n if not filename:\n filename = self.name + '.png'\n plot(self.model, to_file=filename)\n\n\n@deep_model('trivial')\nclass DummyModel(DeepModel):\n\n def build(self, **params):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))\n model.add(Activation('relu'))\n model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal')\n )\n model.add(Activation('softmax'))\n self.model = model\n return model\n\n\n@deep_model('simple')\nclass SimpleFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid'):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n\n\n@deep_model('dropout')\nclass DropoutFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid', dropout=0.2):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_pixels * 4, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n",
"step-4": "<mask token>\n\n\ndef get_model(model_name):\n if model_name not in _deep_models:\n available_models = ', '.join(_deep_models.keys())\n raise ValueError(\"Model '%s' not found. Available models are: %s\" %\n (model_name, available_models))\n return _deep_models[model_name]\n\n\ndef init_model(name, *args, **kwargs):\n return get_model(name)(*args, **kwargs)\n\n\nclass DeepModel:\n image_size = 48\n n_pixels = image_size ** 2\n n_classes = len(FER2013Dataset.VERBOSE_EMOTION)\n\n def __init__(self, *args, **kwargs):\n self.model = None\n\n @property\n def name(self):\n return self.__class__.__name__\n\n def build(self, **params):\n raise NotImplementedError()\n\n def show_structure(self, filename=None):\n if not filename:\n filename = self.name + '.png'\n plot(self.model, to_file=filename)\n\n\n@deep_model('trivial')\nclass DummyModel(DeepModel):\n\n def build(self, **params):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))\n model.add(Activation('relu'))\n model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal')\n )\n model.add(Activation('softmax'))\n self.model = model\n return model\n\n\n@deep_model('simple')\nclass SimpleFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid'):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n\n\n@deep_model('dropout')\nclass DropoutFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid', dropout=0.2):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_pixels * 4, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n return model\n",
"step-5": "from keras.layers import Dense, Activation, Dropout\nfrom keras.utils.visualize_util import plot\nfrom keras.models import Sequential\n\nfrom emotions import FER2013Dataset\n\n\n_deep_models = {}\n\n\ndef deep_model(model_name):\n def wrapper(cls):\n _deep_models[model_name] = cls\n return cls\n return wrapper\n\n\ndef get_model(model_name):\n if model_name not in _deep_models:\n available_models = \", \".join(_deep_models.keys())\n raise ValueError(\n \"Model '%s' not found. Available models are: %s\"\n % (model_name, available_models))\n return _deep_models[model_name]\n\n\ndef init_model(name, *args, **kwargs):\n return get_model(name)(*args, **kwargs)\n\n\nclass DeepModel:\n\n image_size = 48\n n_pixels = image_size ** 2\n n_classes = len(FER2013Dataset.VERBOSE_EMOTION)\n\n def __init__(self, *args, **kwargs):\n self.model = None\n\n @property\n def name(self):\n return self.__class__.__name__\n\n def build(self, **params):\n raise NotImplementedError()\n\n def show_structure(self, filename=None):\n if not filename:\n filename = self.name + '.png'\n plot(self.model, to_file=filename)\n\n\n@deep_model('trivial')\nclass DummyModel(DeepModel):\n\n def build(self, **params):\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init='normal'))\n model.add(Activation('relu'))\n model.add(Dense(self.n_classes, input_dim=self.n_pixels, init='normal'))\n model.add(Activation('softmax'))\n self.model = model\n return model\n\n\n@deep_model('simple')\nclass SimpleFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n\n output_activation='sigmoid'):\n\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n\n return model\n\n\n@deep_model('dropout')\nclass DropoutFeedforwardModel(DeepModel):\n\n def build(self, init='normal', optimizer='adam', activation='relu',\n output_activation='sigmoid', dropout=0.2):\n\n model = Sequential()\n model.add(Dense(self.n_pixels, input_dim=self.n_pixels, init=init))\n model.add(Activation(activation))\n model.add(Dense(self.n_pixels * 2, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_pixels * 4, init=init))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(self.n_classes, init=init))\n model.add(Activation(output_activation))\n self.model = model\n\n return model\n",
"step-ids": [
7,
12,
13,
14,
18
]
}
|
[
7,
12,
13,
14,
18
] |
from typing import List, Tuple
from unittest import TestCase
from solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration
class TestTiming(TestCase):
def test_decompose_ns(self):
# Given
duration: int = 234
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_us(self):
# Given
duration: int = 23456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_ms(self):
# Given
duration: int = 1023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
# Given
duration: int = 45001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
# Given
duration: int = 65001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
# Given
duration: int = 7995125885088
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),
(125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
# Given
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_us(self):
# Given
decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '23.456 μs'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_ms(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
# Given
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_min(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1 min 5 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_h(self):
# Given
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
# Given
duration_ns: int = 7995125885088
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
def test_format_duration_us(self):
# Given
duration_ns: int = 23456
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '23.456 μs'
self.assertEqual(expected_formatted_duration, formatted_duration)
|
normal
|
{
"blob_id": "afecbb46a98fbf6b5c26f5b6c8026aec035fadf1",
"index": 6696,
"step-1": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n <mask token>\n <mask token>\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n <mask token>\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-4": "from typing import List, Tuple\nfrom unittest import TestCase\nfrom solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_us(self):\n duration: int = 23456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456,\n 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_us(self):\n decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '23.456 μs'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n\n def test_format_duration_us(self):\n duration_ns: int = 23456\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '23.456 μs'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n",
"step-5": "from typing import List, Tuple\nfrom unittest import TestCase\n\nfrom solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n # Given\n duration: int = 234\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_us(self):\n # Given\n duration: int = 23456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_ms(self):\n # Given\n duration: int = 1023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n # Given\n duration: int = 45001023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n # Given\n duration: int = 65001023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n # Given\n duration: int = 7995125885088\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),\n (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_us(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '23.456 μs'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_ms(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n # Given\n duration_ns: int = 7995125885088\n\n # When\n formatted_duration: str = format_duration(duration_ns)\n\n # Then\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n\n def test_format_duration_us(self):\n # Given\n duration_ns: int = 23456\n\n # When\n formatted_duration: str = format_duration(duration_ns)\n\n # Then\n expected_formatted_duration: str = '23.456 μs'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n",
"step-ids": [
7,
11,
12,
16,
17
]
}
|
[
7,
11,
12,
16,
17
] |
'''
Unit test for `redi.create_summary_report()`
'''
import unittest
import os
import sys
from lxml import etree
from StringIO import StringIO
import time
import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {
'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {
'total_subjects': 5,
'form_details': {
'Total_chemistry_Forms': 22,
'Total_cbc_Forms': 53
},
'subject_details': {
'60': {'cbc_Forms': 1, 'chemistry_Forms': 1},
'61': {'cbc_Forms': 2, 'chemistry_Forms': 1},
'63': {'cbc_Forms': 11, 'chemistry_Forms': 4},
'59': {'cbc_Forms': 39, 'chemistry_Forms': 16}
},
'errors' : [],
}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {
'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'],
'max_event_alert': [
'This is max event alert 1',
'This is max event alert 2',
'This is max event alert 3']
}
self.expected_xml = '''
<report>
<header>
<project>hcvtarget-uf</project>
<date>'''+time.strftime("%m/%d/%Y")+'''</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>'''
self.schema_str = StringIO('''\
<xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>''')
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root+'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(\
self.test_report_params, \
self.test_report_data, \
self.test_alert_summary, \
self.specimen_taken_time_summary)
result_string = etree.tostring(result)
#print result_string
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
# validate the xml against the xsd schema
self.assertEqual(xml_schema.validate(result), True)
# validate the actual data in xml but strip the white space first
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
# delete the created xml file
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "f9dd21aac7915b9bbf91eeffb5fd58ffdb43c6c3",
"index": 5857,
"step-1": "<mask token>\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "<mask token>\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, '../')\nproj_root = os.path.abspath(goal_dir) + '/'\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport os\nimport sys\nfrom lxml import etree\nfrom StringIO import StringIO\nimport time\nimport redi\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, '../')\nproj_root = os.path.abspath(goal_dir) + '/'\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n self.test_report_data = {'total_subjects': 5, 'form_details': {\n 'Total_chemistry_Forms': 22, 'Total_cbc_Forms': 53},\n 'subject_details': {'60': {'cbc_Forms': 1, 'chemistry_Forms': 1\n }, '61': {'cbc_Forms': 2, 'chemistry_Forms': 1}, '63': {\n 'cbc_Forms': 11, 'chemistry_Forms': 4}, '59': {'cbc_Forms': 39,\n 'chemistry_Forms': 16}}, 'errors': []}\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'], 'max_event_alert': [\n 'This is max event alert 1', 'This is max event alert 2',\n 'This is max event alert 3']}\n self.expected_xml = \"\"\"\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>\"\"\" + time.strftime('%m/%d/%Y') + \"\"\"</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>\"\"\"\n self.schema_str = StringIO(\n \"\"\" <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>\"\"\"\n )\n return\n\n def test_create_summary_report(self):\n sys.path.append('config')\n self.newpath = proj_root + 'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n result = redi.create_summary_report(self.test_report_params, self.\n test_report_data, self.test_alert_summary, self.\n specimen_taken_time_summary)\n result_string = etree.tostring(result)\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n self.assertEqual(xml_schema.validate(result), True)\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nUnit test for `redi.create_summary_report()`\n'''\nimport unittest\nimport os\nimport sys\nfrom lxml import etree\nfrom StringIO import StringIO\nimport time\nimport redi\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\ngoal_dir = os.path.join(file_dir, \"../\")\nproj_root = os.path.abspath(goal_dir)+'/'\n\nDEFAULT_DATA_DIRECTORY = os.getcwd()\n\nclass TestCreateSummaryReport(unittest.TestCase):\n\n def setUp(self):\n redi.configure_logging(DEFAULT_DATA_DIRECTORY)\n self.test_report_params = {\n 'project': 'hcvtarget-uf',\n 'report_file_path': proj_root + 'config/report.xml',\n 'redcap_uri': 'https://hostname.org'}\n\n self.test_report_data = {\n 'total_subjects': 5,\n 'form_details': {\n 'Total_chemistry_Forms': 22,\n 'Total_cbc_Forms': 53\n },\n 'subject_details': {\n '60': {'cbc_Forms': 1, 'chemistry_Forms': 1},\n '61': {'cbc_Forms': 2, 'chemistry_Forms': 1},\n '63': {'cbc_Forms': 11, 'chemistry_Forms': 4},\n '59': {'cbc_Forms': 39, 'chemistry_Forms': 16}\n },\n 'errors' : [],\n }\n self.specimen_taken_time_summary = {'total': 15, 'blank': 3}\n self.test_alert_summary = {\n 'multiple_values_alert': [\n 'This is multiple values alert 1',\n 'This is multiple values alert 2',\n 'This is multiple values alert 3'],\n 'max_event_alert': [\n 'This is max event alert 1',\n 'This is max event alert 2',\n 'This is max event alert 3']\n }\n self.expected_xml = '''\n<report>\n <header>\n <project>hcvtarget-uf</project>\n <date>'''+time.strftime(\"%m/%d/%Y\")+'''</date>\n <redcapServerAddress>https://hostname.org</redcapServerAddress>\n </header>\n <summary>\n <subjectCount>5</subjectCount>\n <forms>\n <form>\n <form_name>Total_cbc_Forms</form_name>\n <form_count>53</form_count>\n </form>\n <form>\n <form_name>Total_chemistry_Forms</form_name>\n <form_count>22</form_count>\n </form>\n </forms>\n </summary>\n <alerts>\n <tooManyForms>\n <eventAlert>\n <message>This is max event alert 1</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 2</message>\n </eventAlert>\n <eventAlert>\n <message>This is max event alert 3</message>\n </eventAlert>\n </tooManyForms>\n <tooManyValues>\n <valuesAlert>\n <message>This is multiple values alert 1</message>\n </valuesAlert>\n <valuesAlert>\n <message>This is multiple values alert 2</message>\n </valuesAlert>\n <valuesAlert><message>This is multiple values alert 3</message>\n </valuesAlert></tooManyValues>\n </alerts>\n <subjectsDetails>\n <Subject><ID>59</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>39</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>16</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>60</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>1</form_count></form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject><ID>61</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>2</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>1</form_count>\n </form>\n </forms>\n </Subject>\n <Subject>\n <ID>63</ID>\n <forms>\n <form>\n <form_name>cbc_Forms</form_name>\n <form_count>11</form_count>\n </form>\n <form>\n <form_name>chemistry_Forms</form_name>\n <form_count>4</form_count>\n </form>\n </forms>\n </Subject>\n </subjectsDetails>\n <errors/>\n <summaryOfSpecimenTakenTimes>\n <total>15</total>\n <blank>3</blank>\n <percent>20.0</percent>\n </summaryOfSpecimenTakenTimes>\n</report>'''\n\n self.schema_str = StringIO('''\\\n <xs:schema attributeFormDefault=\"unqualified\" elementFormDefault=\"qualified\" xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n <xs:element name=\"report\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"header\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"project\"/>\n <xs:element type=\"xs:string\" name=\"date\"/>\n <xs:element type=\"xs:string\" name=\"redcapServerAddress\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"summary\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"subjectCount\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"alerts\">\n <xs:complexType>\n\n <xs:sequence>\n <xs:element name=\"tooManyForms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"eventAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n\n <xs:element name=\"tooManyValues\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"valuesAlert\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"message\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"subjectsDetails\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"Subject\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"ID\"/>\n <xs:element name=\"forms\">\n <xs:complexType>\n <xs:sequence>\n <xs:element name=\"form\" maxOccurs=\"unbounded\" minOccurs=\"0\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:string\" name=\"form_name\"/>\n <xs:element type=\"xs:byte\" name=\"form_count\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n <xs:element name=\"errors\">\n </xs:element>\n <xs:element name=\"summaryOfSpecimenTakenTimes\">\n <xs:complexType>\n <xs:sequence>\n <xs:element type=\"xs:byte\" name=\"total\"/>\n <xs:element type=\"xs:byte\" name=\"blank\"/>\n <xs:element type=\"xs:float\" name=\"percent\"/>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n </xs:sequence>\n </xs:complexType>\n </xs:element>\n</xs:schema>''')\n return\n\n def test_create_summary_report(self):\n\n sys.path.append('config')\n self.newpath = proj_root+'config'\n self.configFolderCreatedNow = False\n if not os.path.exists(self.newpath):\n self.configFolderCreatedNow = True\n os.makedirs(self.newpath)\n\n result = redi.create_summary_report(\\\n self.test_report_params, \\\n self.test_report_data, \\\n self.test_alert_summary, \\\n self.specimen_taken_time_summary)\n result_string = etree.tostring(result)\n #print result_string\n xmlschema_doc = etree.parse(self.schema_str)\n xml_schema = etree.XMLSchema(xmlschema_doc)\n # validate the xml against the xsd schema\n self.assertEqual(xml_schema.validate(result), True)\n # validate the actual data in xml but strip the white space first\n parser = etree.XMLParser(remove_blank_text=True)\n clean_tree = etree.XML(self.expected_xml, parser=parser)\n self.expected_xml = etree.tostring(clean_tree)\n\n self.assertEqual(self.expected_xml, result_string)\n\n def tearDown(self):\n # delete the created xml file\n with open(proj_root + 'config/report.xml'):\n os.remove(proj_root + 'config/report.xml')\n\n if self.configFolderCreatedNow:\n os.rmdir(self.newpath)\n return\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# OSINT By FajarTheGGman For Google Code-in 2019©
import urllib3 as url
class GCI:
def banner():
print("[---- OSINT By FajarTheGGman ----]\n")
def main():
user = str(input("[!] Input Name Victim ? "))
init = url.PoolManager()
a = init.request("GET", "https://facebook.com/" + user)
b = init.request("GET", "https://instagram.com/" + user)
c = init.request("GET", "https://twitter.com/" + user)
if a.status == 200:
print("[+] " + user + " => Found In Facebook")
else:
print("[-] " + user + " => NotFound in Facebook")
if b.status == 200:
print("[+] " + user + " => Found In Instagram")
else:
print("[-] " + user + " => NotFound in Instagram")
if b.status == 200:
print("[+] " + user + " => Found In Twitter")
else:
print("[-] " + user + " => NotFound in Twitter")
x = GCI
x.banner()
x.main()
|
normal
|
{
"blob_id": "6c8180d24110045348d9c2041c0cca26fa9ea2d2",
"index": 4318,
"step-1": "<mask token>\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n\n def main():\n user = str(input('[!] Input Name Victim ? '))\n init = url.PoolManager()\n a = init.request('GET', 'https://facebook.com/' + user)\n b = init.request('GET', 'https://instagram.com/' + user)\n c = init.request('GET', 'https://twitter.com/' + user)\n if a.status == 200:\n print('[+] ' + user + ' => Found In Facebook')\n else:\n print('[-] ' + user + ' => NotFound in Facebook')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Instagram')\n else:\n print('[-] ' + user + ' => NotFound in Instagram')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Twitter')\n else:\n print('[-] ' + user + ' => NotFound in Twitter')\n\n\n<mask token>\nx.banner()\nx.main()\n",
"step-3": "<mask token>\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n\n def main():\n user = str(input('[!] Input Name Victim ? '))\n init = url.PoolManager()\n a = init.request('GET', 'https://facebook.com/' + user)\n b = init.request('GET', 'https://instagram.com/' + user)\n c = init.request('GET', 'https://twitter.com/' + user)\n if a.status == 200:\n print('[+] ' + user + ' => Found In Facebook')\n else:\n print('[-] ' + user + ' => NotFound in Facebook')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Instagram')\n else:\n print('[-] ' + user + ' => NotFound in Instagram')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Twitter')\n else:\n print('[-] ' + user + ' => NotFound in Twitter')\n\n\nx = GCI\nx.banner()\nx.main()\n",
"step-4": "import urllib3 as url\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n\n def main():\n user = str(input('[!] Input Name Victim ? '))\n init = url.PoolManager()\n a = init.request('GET', 'https://facebook.com/' + user)\n b = init.request('GET', 'https://instagram.com/' + user)\n c = init.request('GET', 'https://twitter.com/' + user)\n if a.status == 200:\n print('[+] ' + user + ' => Found In Facebook')\n else:\n print('[-] ' + user + ' => NotFound in Facebook')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Instagram')\n else:\n print('[-] ' + user + ' => NotFound in Instagram')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Twitter')\n else:\n print('[-] ' + user + ' => NotFound in Twitter')\n\n\nx = GCI\nx.banner()\nx.main()\n",
"step-5": "# OSINT By FajarTheGGman For Google Code-in 2019©\r\n\r\nimport urllib3 as url\r\n\r\nclass GCI:\r\n\tdef banner():\r\n\t\tprint(\"[---- OSINT By FajarTheGGman ----]\\n\")\r\n\r\n\tdef main():\r\n\t\tuser = str(input(\"[!] Input Name Victim ? \"))\r\n\t\tinit = url.PoolManager()\r\n\t\ta = init.request(\"GET\", \"https://facebook.com/\" + user)\r\n\t\tb = init.request(\"GET\", \"https://instagram.com/\" + user)\r\n\t\tc = init.request(\"GET\", \"https://twitter.com/\" + user)\r\n\t\tif a.status == 200:\r\n\t\t\tprint(\"[+] \" + user + \" => Found In Facebook\")\r\n\t\telse:\r\n\t\t\tprint(\"[-] \" + user + \" => NotFound in Facebook\")\r\n\r\n\t\tif b.status == 200:\r\n\t\t\tprint(\"[+] \" + user + \" => Found In Instagram\")\r\n\t\telse:\r\n\t\t\tprint(\"[-] \" + user + \" => NotFound in Instagram\")\r\n\r\n\t\tif b.status == 200:\r\n\t\t\tprint(\"[+] \" + user + \" => Found In Twitter\")\r\n\t\telse:\r\n\t\t\tprint(\"[-] \" + user + \" => NotFound in Twitter\")\r\n\r\nx = GCI\r\nx.banner()\r\nx.main()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
from c7n.utils import type_schema
from c7n.filters.core import ValueFilter
@resources.register('mysql-flexibleserver')
class MySQLFlexibleServer(ArmResourceManager):
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Databases']
service = 'azure.mgmt.rdbms.mysql_flexibleservers'
client = 'MySQLManagementClient'
enum_spec = ('servers', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup'
)
resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'
@MySQLFlexibleServer.filter_registry.register('server-parameter')
class ServerParametersFilter(ValueFilter):
"""Filter by configuration parameter for mysql flexible server
:example:
Example JSON document showing the data format provided to the filter
.. code-block:: json
{
"value": "TLSv1.2"
"description": "Which protocols the server permits for encrypted
connections. By default, TLS 1.2 is enforced",
"defaultValue": "TLSv1.2",
"dataType": "Set",
"allowedValues": "TLSv1,TLSv1.1,TLSv1.2",
"source": "system-default",
"isReadOnly": "False",
"isConfigPendingRestart": "False",
"isDynamicConfig": "False",
}
:example:
Find Mysql Flexible servers with tls_version not set to TLSV1.2
.. code-block:: yaml
policies:
- name: mysql-flexible-server-tls-version
resource: azure.mysql-flexibleserver
filters:
- type: server-parameter
name: tls_version
key: value
op: eq
value: 'TLSv1.2'
"""
schema = type_schema(
'server-parameter',
required=['type', 'name'],
rinherit=ValueFilter.schema,
name={
'type': 'string',
'allowed_value': ['TLSv1.2']
},
)
def __call__(self, resource):
key = f'c7n:config-params:{self.data["name"]}'
if key not in resource['properties']:
client = self.manager.get_client()
query = client.configurations.get(
resource['resourceGroup'],
resource['name'],
self.data["name"]
)
resource['properties'][key] = query.serialize(True).get('properties')
return super().__call__(resource['properties'].get(key))
|
normal
|
{
"blob_id": "b9bc6a9dbb3dbe51fbae45078bd499fb97fa003f",
"index": 3950,
"step-1": "<mask token>\n\n\[email protected]_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n <mask token>\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-2": "<mask token>\n\n\[email protected]_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-3": "<mask token>\n\n\[email protected]('mysql-flexibleserver')\nclass MySQLFlexibleServer(ArmResourceManager):\n\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Databases']\n service = 'azure.mgmt.rdbms.mysql_flexibleservers'\n client = 'MySQLManagementClient'\n enum_spec = 'servers', 'list', None\n default_report_fields = 'name', 'location', 'resourceGroup'\n resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'\n\n\[email protected]_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-4": "from c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\nfrom c7n.utils import type_schema\nfrom c7n.filters.core import ValueFilter\n\n\[email protected]('mysql-flexibleserver')\nclass MySQLFlexibleServer(ArmResourceManager):\n\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Databases']\n service = 'azure.mgmt.rdbms.mysql_flexibleservers'\n client = 'MySQLManagementClient'\n enum_spec = 'servers', 'list', None\n default_report_fields = 'name', 'location', 'resourceGroup'\n resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'\n\n\[email protected]_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n schema = type_schema('server-parameter', required=['type', 'name'],\n rinherit=ValueFilter.schema, name={'type': 'string',\n 'allowed_value': ['TLSv1.2']})\n\n def __call__(self, resource):\n key = f\"c7n:config-params:{self.data['name']}\"\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(resource['resourceGroup'],\n resource['name'], self.data['name'])\n resource['properties'][key] = query.serialize(True).get(\n 'properties')\n return super().__call__(resource['properties'].get(key))\n",
"step-5": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\nfrom c7n.utils import type_schema\nfrom c7n.filters.core import ValueFilter\n\n\[email protected]('mysql-flexibleserver')\nclass MySQLFlexibleServer(ArmResourceManager):\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Databases']\n\n service = 'azure.mgmt.rdbms.mysql_flexibleservers'\n client = 'MySQLManagementClient'\n enum_spec = ('servers', 'list', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup'\n )\n resource_type = 'Microsoft.DBForMySQL/flexibleservers/configurations'\n\n\[email protected]_registry.register('server-parameter')\nclass ServerParametersFilter(ValueFilter):\n \"\"\"Filter by configuration parameter for mysql flexible server\n\n :example:\n\n Example JSON document showing the data format provided to the filter\n\n .. code-block:: json\n\n {\n \"value\": \"TLSv1.2\"\n \"description\": \"Which protocols the server permits for encrypted\n connections. By default, TLS 1.2 is enforced\",\n \"defaultValue\": \"TLSv1.2\",\n \"dataType\": \"Set\",\n \"allowedValues\": \"TLSv1,TLSv1.1,TLSv1.2\",\n \"source\": \"system-default\",\n \"isReadOnly\": \"False\",\n \"isConfigPendingRestart\": \"False\",\n \"isDynamicConfig\": \"False\",\n }\n\n :example:\n\n Find Mysql Flexible servers with tls_version not set to TLSV1.2\n\n .. code-block:: yaml\n\n policies:\n - name: mysql-flexible-server-tls-version\n resource: azure.mysql-flexibleserver\n filters:\n - type: server-parameter\n name: tls_version\n key: value\n op: eq\n value: 'TLSv1.2'\n\n \"\"\"\n\n schema = type_schema(\n 'server-parameter',\n required=['type', 'name'],\n rinherit=ValueFilter.schema,\n name={\n 'type': 'string',\n 'allowed_value': ['TLSv1.2']\n },\n )\n\n def __call__(self, resource):\n key = f'c7n:config-params:{self.data[\"name\"]}'\n if key not in resource['properties']:\n client = self.manager.get_client()\n query = client.configurations.get(\n resource['resourceGroup'],\n resource['name'],\n self.data[\"name\"]\n )\n\n resource['properties'][key] = query.serialize(True).get('properties')\n\n return super().__call__(resource['properties'].get(key))\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
Password Requirements
"""
# Write a Python program called "pw_validator" to validate a password based on the security requirements outlined below.
# VALIDATION REQUIREMENTS:
## At least 1 lowercase letter [a-z]
## At least 1 uppercase letter [A-Z].
## At least 1 number [0-9].
## At least 1 special character [~!@#$%&*].
## Min length 6 characters.
## Max length 16 characters.
def pw_validator(pw):
pw = list(pw)
if len(pw) < 6 or len(pw) > 16:
return 'Please enter a valid password.'
num_count = 0
lower_count = 0
upper_count = 0
spec_count = 0
for i in pw:
# check numbers
if i in '0123456789':
idx = pw.index(i)
pw[idx] = int(i)
num_count += 1
# check lowercase letters
if i in 'abcdefghijklmnopqrstuvwxyz':
idx = pw.index(i)
lower_count += 1
# check uppercase letters
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
idx = pw.index(i)
upper_count += 1
# check special char
if i in '~!@#$%&*':
idx = pw.index(i)
spec_count += 1
if num_count == 0 or lower_count == 0 or upper_count == 0 or spec_count == 0:
return 'Please enter a valid password.'
else:
return 'Success!'
# < 6 char
a = pw_validator('abc')
print(f'abc: {a}')
# > 16 char
b = pw_validator('1234567890abcdefg')
print(f'1234567890abcdefg: {b}')
# no numbers
c = pw_validator('@bcdEFGh!j')
print(f'@bcdEFGh!j: {c}')
# no lowercase letters
d = pw_validator('@BCD3EFGH!J')
print(f'@BCD3EFGH!J: {d}')
# no uppercase letters
e = pw_validator('@bcd3efgh!j')
print(f'@bcd3efgh!j: {e}')
# no special characters
f = pw_validator('Abcd3FGhIj112')
print(f'Abcd3FGhIj112: {f}')
# valid pw
g = pw_validator('P$kj35S&7')
print(f'P$kj35S&7: {g}')
|
normal
|
{
"blob_id": "d72f9d521613accfd93e6de25a71d188626a0952",
"index": 4807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef pw_validator(pw):\n pw = list(pw)\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n for i in pw:\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n if (num_count == 0 or lower_count == 0 or upper_count == 0 or \n spec_count == 0):\n return 'Please enter a valid password.'\n else:\n return 'Success!'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef pw_validator(pw):\n pw = list(pw)\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n for i in pw:\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n if (num_count == 0 or lower_count == 0 or upper_count == 0 or \n spec_count == 0):\n return 'Please enter a valid password.'\n else:\n return 'Success!'\n\n\n<mask token>\nprint(f'abc: {a}')\n<mask token>\nprint(f'1234567890abcdefg: {b}')\n<mask token>\nprint(f'@bcdEFGh!j: {c}')\n<mask token>\nprint(f'@BCD3EFGH!J: {d}')\n<mask token>\nprint(f'@bcd3efgh!j: {e}')\n<mask token>\nprint(f'Abcd3FGhIj112: {f}')\n<mask token>\nprint(f'P$kj35S&7: {g}')\n",
"step-4": "<mask token>\n\n\ndef pw_validator(pw):\n pw = list(pw)\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n for i in pw:\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n if (num_count == 0 or lower_count == 0 or upper_count == 0 or \n spec_count == 0):\n return 'Please enter a valid password.'\n else:\n return 'Success!'\n\n\na = pw_validator('abc')\nprint(f'abc: {a}')\nb = pw_validator('1234567890abcdefg')\nprint(f'1234567890abcdefg: {b}')\nc = pw_validator('@bcdEFGh!j')\nprint(f'@bcdEFGh!j: {c}')\nd = pw_validator('@BCD3EFGH!J')\nprint(f'@BCD3EFGH!J: {d}')\ne = pw_validator('@bcd3efgh!j')\nprint(f'@bcd3efgh!j: {e}')\nf = pw_validator('Abcd3FGhIj112')\nprint(f'Abcd3FGhIj112: {f}')\ng = pw_validator('P$kj35S&7')\nprint(f'P$kj35S&7: {g}')\n",
"step-5": "\"\"\"\nPassword Requirements\n\"\"\"\n\n# Write a Python program called \"pw_validator\" to validate a password based on the security requirements outlined below.\n\n# VALIDATION REQUIREMENTS:\n## At least 1 lowercase letter [a-z]\n## At least 1 uppercase letter [A-Z].\n## At least 1 number [0-9].\n## At least 1 special character [~!@#$%&*].\n## Min length 6 characters.\n## Max length 16 characters.\n\ndef pw_validator(pw):\n pw = list(pw)\n\n if len(pw) < 6 or len(pw) > 16:\n return 'Please enter a valid password.'\n\n num_count = 0\n lower_count = 0\n upper_count = 0\n spec_count = 0\n\n for i in pw:\n # check numbers\n if i in '0123456789':\n idx = pw.index(i)\n pw[idx] = int(i)\n num_count += 1\n # check lowercase letters\n if i in 'abcdefghijklmnopqrstuvwxyz':\n idx = pw.index(i)\n lower_count += 1\n # check uppercase letters\n if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n idx = pw.index(i)\n upper_count += 1\n # check special char\n if i in '~!@#$%&*':\n idx = pw.index(i)\n spec_count += 1\n\n if num_count == 0 or lower_count == 0 or upper_count == 0 or spec_count == 0:\n return 'Please enter a valid password.'\n else: \n return 'Success!'\n\n# < 6 char\na = pw_validator('abc')\nprint(f'abc: {a}')\n\n# > 16 char\nb = pw_validator('1234567890abcdefg')\nprint(f'1234567890abcdefg: {b}')\n\n# no numbers\nc = pw_validator('@bcdEFGh!j')\nprint(f'@bcdEFGh!j: {c}')\n\n# no lowercase letters\nd = pw_validator('@BCD3EFGH!J')\nprint(f'@BCD3EFGH!J: {d}')\n\n# no uppercase letters\ne = pw_validator('@bcd3efgh!j')\nprint(f'@bcd3efgh!j: {e}')\n\n# no special characters\nf = pw_validator('Abcd3FGhIj112')\nprint(f'Abcd3FGhIj112: {f}')\n\n# valid pw\ng = pw_validator('P$kj35S&7')\nprint(f'P$kj35S&7: {g}')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
class View1(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view1")
class View2(LoginRequiredMixin, View):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return response
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
@method_decorator(login_required, name='dispatch')
class View3(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('cbv.do_something'):
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("Contenu view2")
|
normal
|
{
"blob_id": "826abb18b11afd7a010e2bfc5a29ba068218c23a",
"index": 7550,
"step-1": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n <mask token>\n <mask token>\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-2": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n <mask token>\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-3": "<mask token>\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view1')\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-4": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\n\n\nclass View1(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view1')\n\n\nclass View2(LoginRequiredMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse('Contenu view2')\n",
"step-5": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\n\nclass View1(LoginRequiredMixin, View):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view1\")\n\nclass View2(LoginRequiredMixin, View):\n def dispatch(self, request, *args, **kwargs):\n response = super().dispatch(request, *args, **kwargs)\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return response\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view2\")\n\n@method_decorator(login_required, name='dispatch')\nclass View3(View):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('cbv.do_something'):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n return HttpResponse(\"Contenu view2\")\n\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 1e-3)
m.bias.data.fill_(0.)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#--------------------------------
# Device configuration
#--------------------------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: %s'%device)
#--------------------------------
# Hyper-parameters
#--------------------------------
input_size = 3
num_classes = 10
hidden_size = [128, 512, 512, 512, 512]
num_epochs = 20
batch_size = 200
learning_rate = 2e-3
learning_rate_decay = 0.95
reg=0.001
num_training= 49000
num_validation =1000
norm_layer = None #norm_layer="BN"
print(hidden_size)
dropout_p = 0 #probability of dropout
#-------------------------------------------------
# Load the CIFAR-10 dataset
#-------------------------------------------------
#################################################################################
# TODO: Q3.a Choose the right data augmentation transforms with the right #
# hyper-parameters and put them in the data_aug_transforms variable #
#################################################################################
data_aug_transforms = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
data_aug_transforms += [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(2),
transforms.RandomGrayscale(),
transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),
transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),
]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
norm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
cifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=True,
transform=norm_transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=False,
transform=test_transform
)
#-------------------------------------------------
# Prepare the training and validation splits
#-------------------------------------------------
mask = list(range(num_training))
train_dataset = torch.utils.data.Subset(cifar_dataset, mask)
mask = list(range(num_training, num_training + num_validation))
val_dataset = torch.utils.data.Subset(cifar_dataset, mask)
#-------------------------------------------------
# Data loader
#-------------------------------------------------
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#-------------------------------------------------
# Convolutional neural network (Q1.a and Q2.a)
# Set norm_layer for different networks whether using batch normalization
#-------------------------------------------------
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):
super(ConvNet, self).__init__()
#################################################################################
# TODO: Initialize the modules required to implement the convolutional layer #
# described in the exercise. #
# For Q1.a make use of conv2d and relu layers from the torch.nn module. #
# For Q2.a make use of BatchNorm2d layer from the torch.nn module. #
# For Q3.b Use Dropout layer from the torch.nn module. #
#################################################################################
layers = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# Adding the other blocks
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# stacking convolutional blocks
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
# Fully connected layer
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
def forward(self, x):
#################################################################################
# TODO: Implement the forward pass computations #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return out
#-------------------------------------------------
# Calculate the model size (Q1.b)
# if disp is true, print the model parameters, otherwise, only return the number of parameters.
#-------------------------------------------------
def PrintModelSize(model, disp=True):
#################################################################################
# TODO: Implement the function to count the number of trainable parameters in #
# the input model. This useful to track the capacity of the model you are #
# training #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model_sz = 0
for parameter in model.parameters():
model_sz += parameter.nelement()
if disp == True:
print("\nNumber of parameters: ", model_sz)
print("\n")
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model_sz
#-------------------------------------------------
# Calculate the model size (Q1.c)
# visualize the convolution filters of the first convolution layer of the input model
#-------------------------------------------------
def VisualizeFilter(model):
#################################################################################
# TODO: Implement the functiont to visualize the weights in the first conv layer#
# in the model. Visualize them as a single image of stacked filters. #
# You can use matlplotlib.imshow to visualize an image in python #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to("cpu")
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8*4,4):
for j in range(0, 16*4, 4):
kernel_map[i:i+3, j:j+3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#======================================================================================
# Q1.a: Implementing convolutional neural net in PyTorch
#======================================================================================
# In this question we will implement a convolutional neural networks using the PyTorch
# library. Please complete the code for the ConvNet class evaluating the model
#--------------------------------------------------------------------------------------
model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)
# Q2.a - Initialize the model with correct batch norm layer
model.apply(weights_init)
# Print the model
print(model)
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
break
# Print model size
#======================================================================================
# Q1.b: Implementing the function to count the number of trainable parameters in the model
#======================================================================================
PrintModelSize(model)
#======================================================================================
# Q1.a: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
#======================================================================================
#VisualizeFilter(model)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)
# Train the model
lr = learning_rate
total_step = len(train_loader)
loss_train = []
loss_val = []
best_accuracy = 0
accuracy_val = []
best_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance
#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)
for epoch in range(num_epochs):
model.train()
loss_iter = 0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_iter += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
loss_train.append(loss_iter/(len(train_loader)*batch_size))
# Code to update the lr
lr *= learning_rate_decay
update_lr(optimizer, lr)
model.eval()
with torch.no_grad():
correct = 0
total = 0
loss_iter = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss_iter += loss.item()
loss_val.append(loss_iter/(len(val_loader)*batch_size))
accuracy = 100 * correct / total
accuracy_val.append(accuracy)
print('Validation accuracy is: {} %'.format(accuracy))
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to save the model which has #
# the model with the best validation accuracy so-far (use best_model). #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if accuracy > best_accuracy:
best_model.load_state_dict(model.state_dict())
best_accuracy=accuracy
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
model.eval()
plt.figure(2)
plt.plot(loss_train, 'r', label='Train loss')
plt.plot(loss_val, 'g', label='Val loss')
plt.legend()
plt.show()
plt.figure(3)
plt.plot(accuracy_val, 'r', label='Val accuracy')
plt.legend()
plt.show()
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#
# best model so far and perform testing with this model. #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model.load_state_dict(best_model.state_dict())
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#Compute accuracy on the test set
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 1000:
break
print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))
# Q1.c: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
VisualizeFilter(model)
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
normal
|
{
"blob_id": "0553bd4c7261197a1a80c5551305a16e7bfdc761",
"index": 2398,
"step-1": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\n<mask token>\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\nprint('Using device: %s' % device)\n<mask token>\nprint(hidden_size)\n<mask token>\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.\n RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.\n RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter\n (brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.\n RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=\n False, fillcolor=0)]\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\ndef PrintModelSize(model, disp=True):\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print('\\nNumber of parameters: ', model_sz)\n print('\\n')\n return model_sz\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\n<mask token>\nmodel.apply(weights_init)\nprint(model)\nfor i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n break\nPrintModelSize(model)\n<mask token>\nfor epoch in range(num_epochs):\n model.train()\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_iter += loss.item()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\n loss_train.append(loss_iter / (len(train_loader) * batch_size))\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n loss_val.append(loss_iter / (len(val_loader) * batch_size))\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy = accuracy\nmodel.eval()\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\nmodel.load_state_dict(best_model.state_dict())\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n print('Accuracy of the network on the {} test images: {} %'.format(\n total, 100 * correct / total))\nVisualizeFilter(model)\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-4": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device: %s' % device)\ninput_size = 3\nnum_classes = 10\nhidden_size = [128, 512, 512, 512, 512]\nnum_epochs = 20\nbatch_size = 200\nlearning_rate = 0.002\nlearning_rate_decay = 0.95\nreg = 0.001\nnum_training = 49000\nnum_validation = 1000\nnorm_layer = None\nprint(hidden_size)\ndropout_p = 0\ndata_aug_transforms = []\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.\n RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.\n RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter\n (brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.\n RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=\n False, fillcolor=0)]\nnorm_transform = transforms.Compose(data_aug_transforms + [transforms.\n ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ntest_transform = transforms.Compose([transforms.ToTensor(), transforms.\n Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ncifar_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=True,\n transform=norm_transform, download=True)\ntest_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=False,\n transform=test_transform)\nmask = list(range(num_training))\ntrain_dataset = torch.utils.data.Subset(cifar_dataset, mask)\nmask = list(range(num_training, num_training + num_validation))\nval_dataset = torch.utils.data.Subset(cifar_dataset, mask)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, shuffle=True)\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=\n batch_size, shuffle=False)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=\n batch_size, shuffle=False)\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\ndef PrintModelSize(model, disp=True):\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print('\\nNumber of parameters: ', model_sz)\n print('\\n')\n return model_sz\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\nmodel = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer\n ).to(device)\nmodel.apply(weights_init)\nprint(model)\nfor i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n break\nPrintModelSize(model)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=reg)\nlr = learning_rate\ntotal_step = len(train_loader)\nloss_train = []\nloss_val = []\nbest_accuracy = 0\naccuracy_val = []\nbest_model = type(model)(input_size, hidden_size, num_classes, norm_layer=\n norm_layer)\nfor epoch in range(num_epochs):\n model.train()\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_iter += loss.item()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\n loss_train.append(loss_iter / (len(train_loader) * batch_size))\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n loss_val.append(loss_iter / (len(val_loader) * batch_size))\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy = accuracy\nmodel.eval()\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\nmodel.load_state_dict(best_model.state_dict())\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n print('Accuracy of the network on the {} test images: {} %'.format(\n total, 100 * correct / total))\nVisualizeFilter(model)\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 1e-3)\n m.bias.data.fill_(0.)\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n\n#--------------------------------\n# Device configuration\n#--------------------------------\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device: %s'%device)\n\n#--------------------------------\n# Hyper-parameters\n#--------------------------------\ninput_size = 3\nnum_classes = 10\nhidden_size = [128, 512, 512, 512, 512]\nnum_epochs = 20\nbatch_size = 200\nlearning_rate = 2e-3\nlearning_rate_decay = 0.95\nreg=0.001\nnum_training= 49000\nnum_validation =1000\nnorm_layer = None #norm_layer=\"BN\"\nprint(hidden_size)\n\ndropout_p = 0 #probability of dropout\n\n\n\n#-------------------------------------------------\n# Load the CIFAR-10 dataset\n#-------------------------------------------------\n#################################################################################\n# TODO: Q3.a Choose the right data augmentation transforms with the right #\n# hyper-parameters and put them in the data_aug_transforms variable #\n#################################################################################\ndata_aug_transforms = []\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), \n transforms.RandomHorizontalFlip(), \n transforms.RandomVerticalFlip(), \n transforms.RandomRotation(2),\n transforms.RandomGrayscale(),\n transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),\n transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),\n ]\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\nnorm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\ntest_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\ncifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',\n train=True,\n transform=norm_transform,\n download=True)\n\ntest_dataset = torchvision.datasets.CIFAR10(root='datasets/',\n train=False,\n transform=test_transform\n )\n\n#-------------------------------------------------\n# Prepare the training and validation splits\n#-------------------------------------------------\nmask = list(range(num_training))\ntrain_dataset = torch.utils.data.Subset(cifar_dataset, mask)\nmask = list(range(num_training, num_training + num_validation))\nval_dataset = torch.utils.data.Subset(cifar_dataset, mask)\n\n#-------------------------------------------------\n# Data loader\n#-------------------------------------------------\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n\n\n#-------------------------------------------------\n# Convolutional neural network (Q1.a and Q2.a)\n# Set norm_layer for different networks whether using batch normalization\n#-------------------------------------------------\nclass ConvNet(nn.Module):\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):\n super(ConvNet, self).__init__()\n #################################################################################\n # TODO: Initialize the modules required to implement the convolutional layer #\n # described in the exercise. #\n # For Q1.a make use of conv2d and relu layers from the torch.nn module. #\n # For Q2.a make use of BatchNorm2d layer from the torch.nn module. #\n # For Q3.b Use Dropout layer from the torch.nn module. #\n #################################################################################\n layers = []\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer==\"BN\":\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1, \n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # Adding the other blocks\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n \n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer==\"BN\":\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1, \n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\t\t\n # stacking convolutional blocks\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n\n # Fully connected layer\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n def forward(self, x):\n #################################################################################\n # TODO: Implement the forward pass computations #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return out\n\n\n\n#-------------------------------------------------\n# Calculate the model size (Q1.b)\n# if disp is true, print the model parameters, otherwise, only return the number of parameters.\n#-------------------------------------------------\ndef PrintModelSize(model, disp=True):\n #################################################################################\n # TODO: Implement the function to count the number of trainable parameters in #\n # the input model. This useful to track the capacity of the model you are #\n # training #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print(\"\\nNumber of parameters: \", model_sz)\n print(\"\\n\")\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return model_sz\n\n\n\n#-------------------------------------------------\n# Calculate the model size (Q1.c)\n# visualize the convolution filters of the first convolution layer of the input model\n#-------------------------------------------------\ndef VisualizeFilter(model):\n #################################################################################\n # TODO: Implement the functiont to visualize the weights in the first conv layer#\n # in the model. Visualize them as a single image of stacked filters. #\n # You can use matlplotlib.imshow to visualize an image in python #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))\n\n kernels = list(model.parameters())[0]\n kernels = kernels.to(\"cpu\")\n kernels = kernels.data.numpy()\n\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n\n cnt = 0\n for i in range(0, 8*4,4):\n for j in range(0, 16*4, 4):\n kernel_map[i:i+3, j:j+3, :] = kernels[cnt]\n cnt = cnt + 1\n\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n\n pass\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n\n\n#======================================================================================\n# Q1.a: Implementing convolutional neural net in PyTorch\n#======================================================================================\n# In this question we will implement a convolutional neural networks using the PyTorch\n# library. Please complete the code for the ConvNet class evaluating the model\n#--------------------------------------------------------------------------------------\n\nmodel = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)\n# Q2.a - Initialize the model with correct batch norm layer\n\nmodel.apply(weights_init)\n# Print the model\nprint(model)\n\nfor i, (images, labels) in enumerate(train_loader):\n\timages = images.to(device)\n\n\tbreak\n\n# Print model size\n#======================================================================================\n# Q1.b: Implementing the function to count the number of trainable parameters in the model\n#======================================================================================\nPrintModelSize(model)\n#======================================================================================\n# Q1.a: Implementing the function to visualize the filters in the first conv layers.\n# Visualize the filters before training\n#======================================================================================\n#VisualizeFilter(model)\n\n\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)\n\n# Train the model\nlr = learning_rate\ntotal_step = len(train_loader)\nloss_train = []\nloss_val = []\nbest_accuracy = 0\naccuracy_val = []\nbest_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance\n#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)\nfor epoch in range(num_epochs):\n\n model.train()\n\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n # Move tensors to the configured device\n images = images.to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_iter += loss.item()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n \n loss_train.append(loss_iter/(len(train_loader)*batch_size))\n\n \n # Code to update the lr\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n \n \n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n \n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n \n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n \n loss_val.append(loss_iter/(len(val_loader)*batch_size))\n\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n #################################################################################\n # TODO: Q2.b Implement the early stopping mechanism to save the model which has #\n # the model with the best validation accuracy so-far (use best_model). #\n #################################################################################\n\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy=accuracy\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n \n\n# Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\nmodel.eval()\n\n\n\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\n\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\n\n\n\n#################################################################################\n# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#\n# best model so far and perform testing with this model. #\n#################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\nmodel.load_state_dict(best_model.state_dict())\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n#Compute accuracy on the test set\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n\n print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))\n\n\n\n# Q1.c: Implementing the function to visualize the filters in the first conv layers.\n# Visualize the filters before training\nVisualizeFilter(model)\n\n\n\n# Save the model checkpoint\ntorch.save(model.state_dict(), 'model.ckpt')\n\n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
import openpyxl # 适用于xlsx文件
'''
纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:
{
"1":["张三",150,120,100],
"2":["李四",90,99,95],
"3":["王五",60,66,68]
}
请将上述内容写到 student.xls 文件中
'''
def read_file():
words = []
with open('15.txt', 'r') as file:
content = file.read()
# print(content)
# print(type(content))
word = eval(content)
# print(word)
# print(word.keys())
# for each in word.keys():
# print(each)
# print(word[each])
# print(word.values())
# print(type(word))
for i, j in zip(word.keys(), word.values()):
# print(i, j)
words.append([i, j])
print(words)
return words
def write_list(list): # 写入excel文件
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title = 'test'
value = list
for i in range(0, len(value)):
for j in range(0, len(value[i])):
sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))
wb.save('city.xlsx')
print("写入数据成功!")
if __name__ == '__main__':
# read_file()
write_list(read_file())
|
normal
|
{
"blob_id": "f75e0ddf42cc9797cdf1c4a4477e3d16441af740",
"index": 5478,
"step-1": "<mask token>\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\nif __name__ == '__main__':\n write_list(read_file())\n",
"step-4": "import openpyxl\n<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\nif __name__ == '__main__':\n write_list(read_file())\n",
"step-5": "import openpyxl # 适用于xlsx文件\n'''\n纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:\n\n{\n\t\"1\":[\"张三\",150,120,100],\n\t\"2\":[\"李四\",90,99,95],\n\t\"3\":[\"王五\",60,66,68]\n}\n请将上述内容写到 student.xls 文件中\n'''\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n # print(content)\n # print(type(content))\n\n word = eval(content)\n # print(word)\n # print(word.keys())\n # for each in word.keys():\n # print(each)\n # print(word[each])\n # print(word.values())\n # print(type(word))\n for i, j in zip(word.keys(), word.values()):\n # print(i, j)\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list): # 写入excel文件\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print(\"写入数据成功!\")\n\n\nif __name__ == '__main__':\n # read_file()\n write_list(read_file())\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from base64 import b64encode
from configparser import ConfigParser
import functools
from flask import (
Blueprint, flash, redirect, render_template, request, session, url_for, app
)
from requests.exceptions import SSLError
import spotipy
from spotipy import oauth2
bp = Blueprint('auth', __name__, url_prefix='/auth')
config = ConfigParser()
config.read('spotify.cfg')
CLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip("'")
CLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip("'")
REDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip("'")
SCOPE = 'user-read-currently-playing user-library-read playlist-read-private'
SP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SCOPE)
@bp.route('/login')
def login():
'''
: Create session and login user
: PARAMS None
: RETURN <view>
'''
try:
session.clear()
return redirect(SP_OAUTH.get_authorize_url())
except ConnectionError as e:
flash("Connection error")
@bp.route('/callback/')
def callback():
'''
: Redirect user after login
: PARAMS None
: RETURN <view>
'''
code = request.args.get('code')
token = SP_OAUTH.get_access_token(code)
if token:
session['token'] = token['access_token']
session['refresh'] = token['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError as e:
# flash("Connection error")
return redirect(url_for('home'))
else:
flash("Cannot get access token")
return redirect(url_for('home'))
@bp.route('/logout')
def logout():
'''
: Clear session and log user out
: PARAMS None
: RETURN <view>
'''
session.clear()
return redirect(url_for('home'))
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if 'refresh' in session:
refresh = SP_OAUTH.refresh_access_token(session['refresh'])
session['token'] = refresh['access_token']
session['refresh'] = refresh['refresh_token']
sp = spotipy.Spotify(auth=session['token'])
try:
cu = sp.current_user()
session['display_name'] = cu['display_name']
except SSLError:
# flash("Connection error - please try again.")
return redirect(url_for('home'))
return view(**kwargs)
else:
return redirect(url_for('home'))
return wrapped_view
|
normal
|
{
"blob_id": "8f7ecbe03e9a7a1d9df8cbe4596456e21b84653b",
"index": 9114,
"step-1": "<mask token>\n\n\[email protected]('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\[email protected]('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-2": "<mask token>\nconfig.read('spotify.cfg')\n<mask token>\n\n\[email protected]('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\[email protected]('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-3": "<mask token>\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,\n scope=SCOPE)\n\n\[email protected]('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\[email protected]('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-4": "from base64 import b64encode\nfrom configparser import ConfigParser\nimport functools\nfrom flask import Blueprint, flash, redirect, render_template, request, session, url_for, app\nfrom requests.exceptions import SSLError\nimport spotipy\nfrom spotipy import oauth2\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI,\n scope=SCOPE)\n\n\[email protected]('/login')\ndef login():\n \"\"\"\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n \"\"\"\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash('Connection error')\n\n\[email protected]('/callback/')\ndef callback():\n \"\"\"\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n \"\"\"\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n return redirect(url_for('home'))\n else:\n flash('Cannot get access token')\n return redirect(url_for('home'))\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n \"\"\"\n session.clear()\n return redirect(url_for('home'))\n\n\ndef login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n return wrapped_view\n",
"step-5": "from base64 import b64encode\nfrom configparser import ConfigParser\nimport functools\nfrom flask import (\n Blueprint, flash, redirect, render_template, request, session, url_for, app\n)\nfrom requests.exceptions import SSLError\nimport spotipy\nfrom spotipy import oauth2\n\nbp = Blueprint('auth', __name__, url_prefix='/auth')\nconfig = ConfigParser()\nconfig.read('spotify.cfg')\nCLIENT_ID = config.get('SPOTIFY', 'CLIENT_ID').strip(\"'\")\nCLIENT_SECRET = config.get('SPOTIFY', 'CLIENT_SECRET').strip(\"'\")\nREDIRECT_URI = config.get('SPOTIFY', 'REDIRECT_URI').strip(\"'\")\nSCOPE = 'user-read-currently-playing user-library-read playlist-read-private'\nSP_OAUTH = oauth2.SpotifyOAuth(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, scope=SCOPE)\n\n\[email protected]('/login')\ndef login():\n '''\n : Create session and login user\n : PARAMS None\n : RETURN <view>\n '''\n try:\n session.clear()\n return redirect(SP_OAUTH.get_authorize_url())\n except ConnectionError as e:\n flash(\"Connection error\")\n\n\[email protected]('/callback/')\ndef callback():\n '''\n : Redirect user after login\n : PARAMS None\n : RETURN <view>\n '''\n code = request.args.get('code')\n token = SP_OAUTH.get_access_token(code)\n if token:\n session['token'] = token['access_token']\n session['refresh'] = token['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError as e:\n # flash(\"Connection error\")\n return redirect(url_for('home'))\n else:\n flash(\"Cannot get access token\")\n return redirect(url_for('home'))\n\[email protected]('/logout')\ndef logout():\n '''\n : Clear session and log user out\n : PARAMS None\n : RETURN <view>\n '''\n session.clear()\n return redirect(url_for('home'))\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n\n if 'refresh' in session:\n refresh = SP_OAUTH.refresh_access_token(session['refresh'])\n session['token'] = refresh['access_token']\n session['refresh'] = refresh['refresh_token']\n sp = spotipy.Spotify(auth=session['token'])\n try:\n cu = sp.current_user()\n session['display_name'] = cu['display_name']\n except SSLError:\n # flash(\"Connection error - please try again.\")\n return redirect(url_for('home'))\n return view(**kwargs)\n else:\n return redirect(url_for('home'))\n\n return wrapped_view\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 3.1 on 2020-09-09 15:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='orderproduct',
old_name='products',
new_name='product',
),
]
|
normal
|
{
"blob_id": "0e73153d004137d374637abf70faffabf0bab1fb",
"index": 9762,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('orders', '0001_initial')]\n operations = [migrations.RenameField(model_name='orderproduct',\n old_name='products', new_name='product')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('orders', '0001_initial')]\n operations = [migrations.RenameField(model_name='orderproduct',\n old_name='products', new_name='product')]\n",
"step-5": "# Generated by Django 3.1 on 2020-09-09 15:58\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orders', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='orderproduct',\n old_name='products',\n new_name='product',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import json
def get_webcasts(year):
url = "https://www.sans.org/webcasts/archive/" + str(year)
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
table = soup.find('table', {"class": "table table-bordered table-striped"})
webcasts = []
for row in table.find_all('tr'):
title_content = row.find('td', {"class": "table_data table_data_title"})
if title_content is None:
continue
title_anchor = title_content.find('a')
title_link = title_anchor.get("href")
title = title_anchor.string
date = row.find('td', {"class": "table_data table_data_date"})
sponsor = row.find('td', {"class": "table_data table_data_sponsor"})
speaker = row.find('td', {"class": "table_data table_data_speaker"})
webcast = {"title": title, "date": date.string, "sponsor": sponsor.string,
"speaker": speaker.string}
webcasts.append(webcast)
return webcasts
result = {}
for year in range(2013, 2019):
webcasts = get_webcasts(year)
result[str(year)] = webcasts
print(json.dumps(result))
|
normal
|
{
"blob_id": "14971842092c7aa41477f28cec87628a73a8ffd6",
"index": 8407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_webcasts(year):\n url = 'https://www.sans.org/webcasts/archive/' + str(year)\n page = urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find('table', {'class': 'table table-bordered table-striped'})\n webcasts = []\n for row in table.find_all('tr'):\n title_content = row.find('td', {'class': 'table_data table_data_title'}\n )\n if title_content is None:\n continue\n title_anchor = title_content.find('a')\n title_link = title_anchor.get('href')\n title = title_anchor.string\n date = row.find('td', {'class': 'table_data table_data_date'})\n sponsor = row.find('td', {'class': 'table_data table_data_sponsor'})\n speaker = row.find('td', {'class': 'table_data table_data_speaker'})\n webcast = {'title': title, 'date': date.string, 'sponsor': sponsor.\n string, 'speaker': speaker.string}\n webcasts.append(webcast)\n return webcasts\n\n\n<mask token>\nfor year in range(2013, 2019):\n webcasts = get_webcasts(year)\n result[str(year)] = webcasts\nprint(json.dumps(result))\n",
"step-3": "<mask token>\n\n\ndef get_webcasts(year):\n url = 'https://www.sans.org/webcasts/archive/' + str(year)\n page = urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find('table', {'class': 'table table-bordered table-striped'})\n webcasts = []\n for row in table.find_all('tr'):\n title_content = row.find('td', {'class': 'table_data table_data_title'}\n )\n if title_content is None:\n continue\n title_anchor = title_content.find('a')\n title_link = title_anchor.get('href')\n title = title_anchor.string\n date = row.find('td', {'class': 'table_data table_data_date'})\n sponsor = row.find('td', {'class': 'table_data table_data_sponsor'})\n speaker = row.find('td', {'class': 'table_data table_data_speaker'})\n webcast = {'title': title, 'date': date.string, 'sponsor': sponsor.\n string, 'speaker': speaker.string}\n webcasts.append(webcast)\n return webcasts\n\n\nresult = {}\nfor year in range(2013, 2019):\n webcasts = get_webcasts(year)\n result[str(year)] = webcasts\nprint(json.dumps(result))\n",
"step-4": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport json\n\n\ndef get_webcasts(year):\n url = 'https://www.sans.org/webcasts/archive/' + str(year)\n page = urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find('table', {'class': 'table table-bordered table-striped'})\n webcasts = []\n for row in table.find_all('tr'):\n title_content = row.find('td', {'class': 'table_data table_data_title'}\n )\n if title_content is None:\n continue\n title_anchor = title_content.find('a')\n title_link = title_anchor.get('href')\n title = title_anchor.string\n date = row.find('td', {'class': 'table_data table_data_date'})\n sponsor = row.find('td', {'class': 'table_data table_data_sponsor'})\n speaker = row.find('td', {'class': 'table_data table_data_speaker'})\n webcast = {'title': title, 'date': date.string, 'sponsor': sponsor.\n string, 'speaker': speaker.string}\n webcasts.append(webcast)\n return webcasts\n\n\nresult = {}\nfor year in range(2013, 2019):\n webcasts = get_webcasts(year)\n result[str(year)] = webcasts\nprint(json.dumps(result))\n",
"step-5": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport json\n\ndef get_webcasts(year):\n url = \"https://www.sans.org/webcasts/archive/\" + str(year)\n page = urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find('table', {\"class\": \"table table-bordered table-striped\"})\n\n webcasts = []\n for row in table.find_all('tr'):\n title_content = row.find('td', {\"class\": \"table_data table_data_title\"})\n\n if title_content is None:\n continue\n\n title_anchor = title_content.find('a')\n title_link = title_anchor.get(\"href\")\n title = title_anchor.string\n\n date = row.find('td', {\"class\": \"table_data table_data_date\"})\n sponsor = row.find('td', {\"class\": \"table_data table_data_sponsor\"})\n speaker = row.find('td', {\"class\": \"table_data table_data_speaker\"})\n\n webcast = {\"title\": title, \"date\": date.string, \"sponsor\": sponsor.string,\n \"speaker\": speaker.string}\n webcasts.append(webcast)\n\n return webcasts\n\nresult = {}\nfor year in range(2013, 2019):\n webcasts = get_webcasts(year)\n result[str(year)] = webcasts\n\nprint(json.dumps(result))\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Antirollback clock user space support.
This daemon serves several purposes:
1. Maintain a file containing the minimum time, and periodically
update its value.
2. At startup, write the minimum time to /proc/ar_clock.
The kernel will not allow the time to be set substantially
earlier than this value (there is a small amount of wiggle
room).
"""
__author__ = '[email protected] (Denton Gentry)'
import os
import pwd
import sys
import tempfile
import time
import options
optspec = """
antirollback [options...]
--
i,interval= seconds between updates [28800]
p,persist= path to persistent file [/fiber/config/ar_clock]
u,user= setuid to this user to run
"""
# Unit tests can override these.
BIRTHDAY = 1349064000.0 # 10/1/2012
BUILD_FILENAME = '/etc/softwaredate'
PROC_AR = '/proc/ar_clock'
PROC_UPTIME = '/proc/uptime'
SLEEP = time.sleep
TIMENOW = time.time
def GetPersistTime(ar_filename):
"""Return time stored in ar_filename, or 0.0 if it does not exist."""
try:
with open(ar_filename) as f:
return float(f.read())
except (IOError, ValueError):
return 0.0
def GetBuildDate(build_filename):
"""Return build_date in floating point seconds since epoch."""
try:
with open(build_filename) as f:
return float(f.readline())
except (IOError, ValueError):
return 0.0
def GetMonotime():
"""Return a monotonically increasing count of seconds."""
return float(open(PROC_UPTIME).read().split()[0])
def GetAntirollbackTime(ar_filename):
"""Return the appropriate antirollback time to use at startup."""
now = max(TIMENOW(), GetPersistTime(ar_filename),
GetBuildDate(BUILD_FILENAME), BIRTHDAY)
return now
def StoreAntirollback(now, ar_filename, kern_f):
"""Write time to /proc/ar_clock and the persistent file."""
print 'antirollback time now ' + str(now)
sys.stdout.flush()
kern_f.write(str(now))
kern_f.flush()
tmpdir = os.path.dirname(ar_filename)
with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:
f.write(str(now) + '\n')
f.flush()
os.fsync(f.fileno())
os.rename(f.name, ar_filename)
def LoopIterate(uptime, now, sleeptime, ar_filename, kern_f):
SLEEP(sleeptime)
new_uptime = GetMonotime()
now += (new_uptime - uptime)
uptime = new_uptime
now = max(now, TIMENOW())
StoreAntirollback(now=now, ar_filename=ar_filename, kern_f=kern_f)
return (uptime, now)
def main():
o = options.Options(optspec)
(opt, _, _) = o.parse(sys.argv[1:])
kern_f = open(PROC_AR, 'w')
# Drop privileges
if opt.user:
pd = pwd.getpwnam(opt.user)
os.setuid(pd.pw_uid)
uptime = GetMonotime()
now = GetAntirollbackTime(opt.persist)
StoreAntirollback(now=now, ar_filename=opt.persist, kern_f=kern_f)
while True:
(uptime, now) = LoopIterate(uptime=uptime, now=now,
sleeptime=opt.interval,
ar_filename=opt.persist,
kern_f=kern_f)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "92e7a7825b3f49424ec69196b69aee00bc84da68",
"index": 8879,
"step-1": "#!/usr/bin/python\n# Copyright 2012 Google Inc. All Rights Reserved.\n\n\"\"\"Antirollback clock user space support.\n\nThis daemon serves several purposes:\n 1. Maintain a file containing the minimum time, and periodically\n update its value.\n 2. At startup, write the minimum time to /proc/ar_clock.\n The kernel will not allow the time to be set substantially\n earlier than this value (there is a small amount of wiggle\n room).\n\"\"\"\n\n__author__ = '[email protected] (Denton Gentry)'\n\nimport os\nimport pwd\nimport sys\nimport tempfile\nimport time\nimport options\n\n\noptspec = \"\"\"\nantirollback [options...]\n--\ni,interval= seconds between updates [28800]\np,persist= path to persistent file [/fiber/config/ar_clock]\nu,user= setuid to this user to run\n\"\"\"\n\n\n# Unit tests can override these.\nBIRTHDAY = 1349064000.0 # 10/1/2012\nBUILD_FILENAME = '/etc/softwaredate'\nPROC_AR = '/proc/ar_clock'\nPROC_UPTIME = '/proc/uptime'\nSLEEP = time.sleep\nTIMENOW = time.time\n\n\ndef GetPersistTime(ar_filename):\n \"\"\"Return time stored in ar_filename, or 0.0 if it does not exist.\"\"\"\n try:\n with open(ar_filename) as f:\n return float(f.read())\n except (IOError, ValueError):\n return 0.0\n\n\ndef GetBuildDate(build_filename):\n \"\"\"Return build_date in floating point seconds since epoch.\"\"\"\n try:\n with open(build_filename) as f:\n return float(f.readline())\n except (IOError, ValueError):\n return 0.0\n\n\ndef GetMonotime():\n \"\"\"Return a monotonically increasing count of seconds.\"\"\"\n return float(open(PROC_UPTIME).read().split()[0])\n\n\ndef GetAntirollbackTime(ar_filename):\n \"\"\"Return the appropriate antirollback time to use at startup.\"\"\"\n now = max(TIMENOW(), GetPersistTime(ar_filename),\n GetBuildDate(BUILD_FILENAME), BIRTHDAY)\n return now\n\n\ndef StoreAntirollback(now, ar_filename, kern_f):\n \"\"\"Write time to /proc/ar_clock and the persistent file.\"\"\"\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)\n\n\ndef LoopIterate(uptime, now, sleeptime, ar_filename, kern_f):\n SLEEP(sleeptime)\n new_uptime = GetMonotime()\n now += (new_uptime - uptime)\n uptime = new_uptime\n now = max(now, TIMENOW())\n StoreAntirollback(now=now, ar_filename=ar_filename, kern_f=kern_f)\n return (uptime, now)\n\n\ndef main():\n o = options.Options(optspec)\n (opt, _, _) = o.parse(sys.argv[1:])\n\n kern_f = open(PROC_AR, 'w')\n\n # Drop privileges\n if opt.user:\n pd = pwd.getpwnam(opt.user)\n os.setuid(pd.pw_uid)\n\n uptime = GetMonotime()\n now = GetAntirollbackTime(opt.persist)\n\n StoreAntirollback(now=now, ar_filename=opt.persist, kern_f=kern_f)\n\n while True:\n (uptime, now) = LoopIterate(uptime=uptime, now=now,\n sleeptime=opt.interval,\n ar_filename=opt.persist,\n kern_f=kern_f)\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def geo_avg(x,lat,dim=2):
'''
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
'''
import numpy as np
s = x.shape
if ((len(s)==4) & (dim==2)) or ((len(s)==3) & (dim==1)):
x = np.nanmean(x,axis=-1)
coslat = np.cos(lat/180*np.pi)
s = x.shape
if len(s)==3:
result = np.nanmean(x*coslat[np.newaxis,np.newaxis,:],axis=-1)/np.nanmean(coslat)
if len(s)==2:
result = np.nanmean(x*coslat[np.newaxis,:],axis=-1)/np.nanmean(coslat)
return result
def cal_anomaly(x):
'''
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
'''
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time//12,12,*s[1:]]),axis=0).\
reshape([1,12,*s[1:]]).repeat(len(x)//12,axis=0).reshape(s)
return x-monthly_mean
def select_month(x,target_mon):
'''
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
'''
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i%12 == target_mon-1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime,timedelta
mon_name_list = [(datetime(2000,1,1)+timedelta(days=31*i)).strftime("%b") for i in range(12)]
mon_dict = {mon_name_list[i]:i for i in range(12)}
season_dict = {'DJF':[0,1,11],'JJA':[5,6,7],'SON':[8,9,10],'MAM':[2,3,4]}
phase_dict = {'dry':[0,1,2,11],'wet':[5,6,7,8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i%12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i%12 in season_dict[target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan,x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0]//12,3,*s[1:]]),axis=1)
else:
i_mon = [i for i in range(n_mon) if i%12 in phase_dict[target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan,x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0]//12,4,*s[1:]]),axis=1)
def normalize(x):
'''
function to normalize data
'''
import numpy as np
return (x-np.nanmean(x))/np.nanstd(x)
def find_index(arr,target,method='nearest'):
'''
find an index of target value from amonotonous 1-d array arr
'''
import numpy as np
if method == 'nearest':
return (np.abs(arr - target)).argmin()
else:
if arr[1]<arr[0]: ## if x is a decreasing array, reverse
arr = arr[::-1]
if method == 'higher':
return np.where(arr>=target)[0][0]
if method == 'lower':
return np.where(arr<=target)[0][-1]
def moving_average(arr,n,method = 'nan'):
'''
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
'''
import numpy as np
def moving_average_center(a, n) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n//2-1
l2 = n-l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l-l2+1] = moving_average_center(arr, n)
arr_new[l-l2+1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i+1])
for i in range(l2):
arr_new[-i-1] = np.nanmean(arr[-i-1:])
arr_new[l1:l-l2+1] = moving_average_center(arr, n)
if method == 'diff' and n==13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l-l2+1]-a2).reshape([(len(arr)-n+1)//12,12]).mean(axis=0) # monthly mean difference between arr and running mean
a1 = arr[:6] - diff[6:]
a12 = np.append(a1,a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12,a3)
return arr_new
def convert_cftime_to_int(t):
'''
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
'''
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),'%Y-%m-%dT%H:%M:%S'),
'%Y%m%d'))
def get_lat_lim(lat,lat_min,lat_max):
'''
calculate a range of latitude, in both hemispheres
'''
import numpy as np
i_lat_n = np.where((lat>=lat_min) & (lat<=lat_max))[0]
i_lat_s = np.where((lat<=-lat_min) & (lat>=-lat_max))[0]
i_lats = [i_lat_s,i_lat_n]
return i_lats
|
normal
|
{
"blob_id": "a2871585ce36888cf89c4dc5a6a7de6b212412bb",
"index": 1153,
"step-1": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\n<mask token>\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\n<mask token>\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\n<mask token>\n",
"step-2": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\n<mask token>\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\n<mask token>\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\ndef get_lat_lim(lat, lat_min, lat_max):\n \"\"\"\n calculate a range of latitude, in both hemispheres\n \"\"\"\n import numpy as np\n i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]\n i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]\n i_lats = [i_lat_s, i_lat_n]\n return i_lats\n",
"step-3": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\ndef cal_anomaly(x):\n \"\"\"\n calculate anomaly of a numpy array \n input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month \n output: x with seasonal cycle removed \n \"\"\"\n import numpy as np\n s = x.shape\n n_time = s[0]\n monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0\n ).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)\n return x - monthly_mean\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\n<mask token>\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\ndef get_lat_lim(lat, lat_min, lat_max):\n \"\"\"\n calculate a range of latitude, in both hemispheres\n \"\"\"\n import numpy as np\n i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]\n i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]\n i_lats = [i_lat_s, i_lat_n]\n return i_lats\n",
"step-4": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\ndef cal_anomaly(x):\n \"\"\"\n calculate anomaly of a numpy array \n input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month \n output: x with seasonal cycle removed \n \"\"\"\n import numpy as np\n s = x.shape\n n_time = s[0]\n monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0\n ).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)\n return x - monthly_mean\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\ndef find_index(arr, target, method='nearest'):\n \"\"\"\n find an index of target value from amonotonous 1-d array arr\n \"\"\"\n import numpy as np\n if method == 'nearest':\n return np.abs(arr - target).argmin()\n else:\n if arr[1] < arr[0]:\n arr = arr[::-1]\n if method == 'higher':\n return np.where(arr >= target)[0][0]\n if method == 'lower':\n return np.where(arr <= target)[0][-1]\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\ndef get_lat_lim(lat, lat_min, lat_max):\n \"\"\"\n calculate a range of latitude, in both hemispheres\n \"\"\"\n import numpy as np\n i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]\n i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]\n i_lats = [i_lat_s, i_lat_n]\n return i_lats\n",
"step-5": "def geo_avg(x,lat,dim=2):\n '''\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n '''\n import numpy as np\n s = x.shape\n if ((len(s)==4) & (dim==2)) or ((len(s)==3) & (dim==1)):\n x = np.nanmean(x,axis=-1)\n coslat = np.cos(lat/180*np.pi)\n s = x.shape\n if len(s)==3:\n result = np.nanmean(x*coslat[np.newaxis,np.newaxis,:],axis=-1)/np.nanmean(coslat)\n if len(s)==2:\n result = np.nanmean(x*coslat[np.newaxis,:],axis=-1)/np.nanmean(coslat)\n return result\n\ndef cal_anomaly(x):\n '''\n calculate anomaly of a numpy array \n input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month \n output: x with seasonal cycle removed \n '''\n import numpy as np\n s = x.shape\n n_time = s[0]\n monthly_mean = np.nanmean(x.reshape([n_time//12,12,*s[1:]]),axis=0).\\\n reshape([1,12,*s[1:]]).repeat(len(x)//12,axis=0).reshape(s)\n return x-monthly_mean\n\ndef select_month(x,target_mon):\n '''\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n '''\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i%12 == target_mon-1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime,timedelta\n mon_name_list = [(datetime(2000,1,1)+timedelta(days=31*i)).strftime(\"%b\") for i in range(12)]\n mon_dict = {mon_name_list[i]:i for i in range(12)}\n season_dict = {'DJF':[0,1,11],'JJA':[5,6,7],'SON':[8,9,10],'MAM':[2,3,4]}\n phase_dict = {'dry':[0,1,2,11],'wet':[5,6,7,8]}\n \n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i%12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i%12 in season_dict[target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan,x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0]//12,3,*s[1:]]),axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i%12 in phase_dict[target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan,x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0]//12,4,*s[1:]]),axis=1)\n\ndef normalize(x):\n '''\n function to normalize data \n '''\n import numpy as np\n return (x-np.nanmean(x))/np.nanstd(x)\n\ndef find_index(arr,target,method='nearest'):\n '''\n find an index of target value from amonotonous 1-d array arr\n '''\n import numpy as np\n if method == 'nearest':\n return (np.abs(arr - target)).argmin()\n else:\n if arr[1]<arr[0]: ## if x is a decreasing array, reverse \n arr = arr[::-1] \n if method == 'higher':\n return np.where(arr>=target)[0][0]\n if method == 'lower':\n return np.where(arr<=target)[0][-1]\n \n \ndef moving_average(arr,n,method = 'nan'):\n '''\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n '''\n import numpy as np\n def moving_average_center(a, n) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n//2-1\n l2 = n-l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l-l2+1] = moving_average_center(arr, n)\n arr_new[l-l2+1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i+1])\n for i in range(l2):\n arr_new[-i-1] = np.nanmean(arr[-i-1:])\n arr_new[l1:l-l2+1] = moving_average_center(arr, n)\n if method == 'diff' and n==13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l-l2+1]-a2).reshape([(len(arr)-n+1)//12,12]).mean(axis=0) # monthly mean difference between arr and running mean\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1,a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12,a3)\n return arr_new\n\ndef convert_cftime_to_int(t):\n '''\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n '''\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),'%Y-%m-%dT%H:%M:%S'),\n '%Y%m%d'))\n\ndef get_lat_lim(lat,lat_min,lat_max):\n '''\n calculate a range of latitude, in both hemispheres\n '''\n import numpy as np\n i_lat_n = np.where((lat>=lat_min) & (lat<=lat_max))[0]\n i_lat_s = np.where((lat<=-lat_min) & (lat>=-lat_max))[0]\n i_lats = [i_lat_s,i_lat_n]\n return i_lats\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env python
#coding:utf-8
import os
def listDir(path):
allFile = []
subFile = os.listdir(path) #列出当前路径下的目录或者文件,返回列表
for fileName in subFile:
fullFile = os.path.join(path, fileName) #os提供方法连接路径与文件名形成完整路径名,作用同:字符串+“/”+字符串
if os.path.isdir(fullFile): #判断是否为目录或者文件,有isfile()方法
listDir(fullFile) #递归
allFile.append(fullFile.decode('gbk').encode('utf-8')) #对于中文的编码
print fullFile.decode('gbk').encode('utf-8')
return allFile
#递归方式获取文件目录
#递归方法的测试
#listDir("C:/Users/13160/Desktop")
#系统提供遍历目录的方法os.walk(path),返回3元元组(遍历路径名,目录列表,文件列表)
for path, dir, file in os.walk("C:/Users/13160/Desktop"):
for f in file:
print os.path.join(path, f).decode('gbk').encode('utf-8')
for d in dir:
print os.path.join(path, d).decode('gbk').encode('utf-8')
|
normal
|
{
"blob_id": "a4f446d6fd2a34c0ef591d7cbda59dccc0a36611",
"index": 2069,
"step-1": "#!/usr/bin/env python\n#coding:utf-8\n\nimport os\n\ndef listDir(path):\n allFile = []\n subFile = os.listdir(path) #列出当前路径下的目录或者文件,返回列表\n for fileName in subFile:\n fullFile = os.path.join(path, fileName) #os提供方法连接路径与文件名形成完整路径名,作用同:字符串+“/”+字符串\n if os.path.isdir(fullFile): #判断是否为目录或者文件,有isfile()方法\n listDir(fullFile) #递归\n allFile.append(fullFile.decode('gbk').encode('utf-8')) #对于中文的编码\n print fullFile.decode('gbk').encode('utf-8')\n return allFile\n#递归方式获取文件目录\n#递归方法的测试\n#listDir(\"C:/Users/13160/Desktop\")\n\n#系统提供遍历目录的方法os.walk(path),返回3元元组(遍历路径名,目录列表,文件列表)\nfor path, dir, file in os.walk(\"C:/Users/13160/Desktop\"):\n for f in file:\n print os.path.join(path, f).decode('gbk').encode('utf-8')\n for d in dir:\n print os.path.join(path, d).decode('gbk').encode('utf-8')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#14681
#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.
x = int(input())
y = int(input())
if(x>0 and y>0):
print("1")
elif(x>0 and y<0):
print("4")
elif(x<0 and y>0):
print("2")
else:
print("3")
|
normal
|
{
"blob_id": "e9908e32204da8973f06d98430fc660c90b5e303",
"index": 3987,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif x > 0 and y > 0:\n print('1')\nelif x > 0 and y < 0:\n print('4')\nelif x < 0 and y > 0:\n print('2')\nelse:\n print('3')\n",
"step-3": "x = int(input())\ny = int(input())\nif x > 0 and y > 0:\n print('1')\nelif x > 0 and y < 0:\n print('4')\nelif x < 0 and y > 0:\n print('2')\nelse:\n print('3')\n",
"step-4": "#14681\n#점의 좌표를 입력받아 그 점이 어느 사분면에 속하는지 알아내는 프로그램을 작성하시오. 단, x좌표와 y좌표는 모두 양수나 음수라고 가정한다.\n\nx = int(input())\ny = int(input())\n\nif(x>0 and y>0):\n print(\"1\")\nelif(x>0 and y<0):\n print(\"4\")\nelif(x<0 and y>0):\n print(\"2\")\nelse:\n print(\"3\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
# Create your models here.
class Products(models.Model):
title = models.CharField(max_length=255)
year = models.IntegerField(default=0)
feature = models.CharField(max_length=30)
usage_status = models.CharField(max_length=25)
kms_driven = models.CharField(max_length=10)
price = models.CharField(max_length=10)
|
normal
|
{
"blob_id": "5b0252dd862fe1e46c0c1df41935db16ae691dff",
"index": 7277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Products(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Products(models.Model):\n title = models.CharField(max_length=255)\n year = models.IntegerField(default=0)\n feature = models.CharField(max_length=30)\n usage_status = models.CharField(max_length=25)\n kms_driven = models.CharField(max_length=10)\n price = models.CharField(max_length=10)\n",
"step-4": "from django.db import models\n\n\nclass Products(models.Model):\n title = models.CharField(max_length=255)\n year = models.IntegerField(default=0)\n feature = models.CharField(max_length=30)\n usage_status = models.CharField(max_length=25)\n kms_driven = models.CharField(max_length=10)\n price = models.CharField(max_length=10)\n",
"step-5": "from django.db import models\n\n\n# Create your models here.\nclass Products(models.Model):\n title = models.CharField(max_length=255)\n year = models.IntegerField(default=0)\n feature = models.CharField(max_length=30)\n usage_status = models.CharField(max_length=25)\n kms_driven = models.CharField(max_length=10)\n price = models.CharField(max_length=10)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# looping.py
#
# Copyright 2012 Jelle Smet <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#import uvent
#uvent.install()
from gevent import spawn, sleep
from sys import stdout
def looper0():
while True:
for _ in range(100):
stdout.write ("0")
sleep(0)
def looper1():
while True:
for _ in range(100):
stdout.write ("1")
sleep(0)
def main():
spawn(looper0)
spawn(looper1)
sleep(5)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "8c86c0969c47a59db5bd147d3e051a29118d6bf2",
"index": 9855,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# looping.py\n# \n# Copyright 2012 Jelle Smet <[email protected]>\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\n#import uvent\n#uvent.install()\nfrom gevent import spawn, sleep\nfrom sys import stdout\n\n\ndef looper0():\n while True:\n for _ in range(100):\n stdout.write (\"0\")\n\tsleep(0)\n\ndef looper1():\n while True:\n for _ in range(100):\n stdout.write (\"1\")\n sleep(0)\n\ndef main():\n spawn(looper0)\n spawn(looper1)\n sleep(5)\n \n\nif __name__ == '__main__':\n\tmain()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
from cagd.polyline import polyline
from cagd.spline import spline, knots
from cagd.vec import vec2
import cagd.scene_2d as scene_2d
from math import sin,cos,pi, sqrt
#returns a list of num_samples points that are uniformly distributed on the unit circle
def unit_circle_points(num_samples):
a = 2*pi/num_samples
return [vec2(cos(a*i), sin(a*i)) for i in range(num_samples)]
#calculates the deviation between the given spline and a unit circle
#the Manhattan Metrics is chosen
def calculate_circle_deviation(spline):
ideal_d = 1.0
center_x = 0.0
center_y = 0.0
deviation = 0.0
for p in spline.control_points:
deviation += sqrt((p.x - center_x)**2 + (p.y - center_y)**2)
deviation /= len(spline.control_points)
deviation -= ideal_d
return deviation
#interpolate 6 points with a periodic spline to create the number "8"
pts = [vec2( 0, 2.5), vec2(-1, 1), vec2( 1,-1), vec2( 0,-2.5), vec2(-1,-1), vec2(1,1)]
s = spline.interpolate_cubic_periodic(pts)
p = s.get_polyline_from_control_points()
p.set_color("blue")
sc = scene_2d.scene()
sc.set_resolution(900)
sc.add_element(s)
sc.add_element(p)
#generate a spline that approximates the unit circle
n = 100
circle_pts = unit_circle_points(n)
circle = spline.interpolate_cubic_periodic(circle_pts)
p_circle = circle.get_polyline_from_control_points()
#sc.add_element(circle)
#sc.add_element(p_circle)
p_circle.set_color("blue")
error = calculate_circle_deviation(circle)
print("The error is: " + str(error))
sc.write_image()
sc.show()
|
normal
|
{
"blob_id": "35e61add90b5c12f94d5f8071f00d98316461dd6",
"index": 8497,
"step-1": "<mask token>\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\n<mask token>\np.set_color('blue')\n<mask token>\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\n<mask token>\np_circle.set_color('blue')\n<mask token>\nprint('The error is: ' + str(error))\nsc.write_image()\nsc.show()\n",
"step-3": "<mask token>\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\npts = [vec2(0, 2.5), vec2(-1, 1), vec2(1, -1), vec2(0, -2.5), vec2(-1, -1),\n vec2(1, 1)]\ns = spline.interpolate_cubic_periodic(pts)\np = s.get_polyline_from_control_points()\np.set_color('blue')\nsc = scene_2d.scene()\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\nn = 100\ncircle_pts = unit_circle_points(n)\ncircle = spline.interpolate_cubic_periodic(circle_pts)\np_circle = circle.get_polyline_from_control_points()\np_circle.set_color('blue')\nerror = calculate_circle_deviation(circle)\nprint('The error is: ' + str(error))\nsc.write_image()\nsc.show()\n",
"step-4": "from cagd.polyline import polyline\nfrom cagd.spline import spline, knots\nfrom cagd.vec import vec2\nimport cagd.scene_2d as scene_2d\nfrom math import sin, cos, pi, sqrt\n\n\ndef unit_circle_points(num_samples):\n a = 2 * pi / num_samples\n return [vec2(cos(a * i), sin(a * i)) for i in range(num_samples)]\n\n\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x) ** 2 + (p.y - center_y) ** 2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\npts = [vec2(0, 2.5), vec2(-1, 1), vec2(1, -1), vec2(0, -2.5), vec2(-1, -1),\n vec2(1, 1)]\ns = spline.interpolate_cubic_periodic(pts)\np = s.get_polyline_from_control_points()\np.set_color('blue')\nsc = scene_2d.scene()\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\nn = 100\ncircle_pts = unit_circle_points(n)\ncircle = spline.interpolate_cubic_periodic(circle_pts)\np_circle = circle.get_polyline_from_control_points()\np_circle.set_color('blue')\nerror = calculate_circle_deviation(circle)\nprint('The error is: ' + str(error))\nsc.write_image()\nsc.show()\n",
"step-5": "#!/usr/bin/python\n\nfrom cagd.polyline import polyline\nfrom cagd.spline import spline, knots\nfrom cagd.vec import vec2\nimport cagd.scene_2d as scene_2d\nfrom math import sin,cos,pi, sqrt\n\n#returns a list of num_samples points that are uniformly distributed on the unit circle\ndef unit_circle_points(num_samples):\n a = 2*pi/num_samples\n return [vec2(cos(a*i), sin(a*i)) for i in range(num_samples)]\n\n#calculates the deviation between the given spline and a unit circle\n#the Manhattan Metrics is chosen\ndef calculate_circle_deviation(spline):\n ideal_d = 1.0\n center_x = 0.0\n center_y = 0.0\n deviation = 0.0\n for p in spline.control_points:\n deviation += sqrt((p.x - center_x)**2 + (p.y - center_y)**2)\n deviation /= len(spline.control_points)\n deviation -= ideal_d\n return deviation\n\n\n#interpolate 6 points with a periodic spline to create the number \"8\"\npts = [vec2( 0, 2.5), vec2(-1, 1), vec2( 1,-1), vec2( 0,-2.5), vec2(-1,-1), vec2(1,1)]\ns = spline.interpolate_cubic_periodic(pts)\np = s.get_polyline_from_control_points()\np.set_color(\"blue\")\nsc = scene_2d.scene()\nsc.set_resolution(900)\nsc.add_element(s)\nsc.add_element(p)\n\n#generate a spline that approximates the unit circle\nn = 100\ncircle_pts = unit_circle_points(n)\ncircle = spline.interpolate_cubic_periodic(circle_pts)\np_circle = circle.get_polyline_from_control_points()\n#sc.add_element(circle)\n#sc.add_element(p_circle)\np_circle.set_color(\"blue\")\nerror = calculate_circle_deviation(circle)\nprint(\"The error is: \" + str(error))\n\nsc.write_image()\nsc.show()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import json
import random
from time import sleep
url = "data/data.json"
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str((error/simulations) * 100) + "%"
prediction = get_prediction()
print("Prediction: %s" % prediction)
print("Error Margin: %s" % error_margin)
print("Flip the coin and insert your result:\nh = head\nt = tail")
answer = input()
comparator = ""
if answer is "h" or answer is "t":
if answer == "t":
write_data(False)
comparator = "tail"
elif answer == "h":
write_data(True)
comparator = "head"
simulations += 1
if comparator != prediction:
error += 1
else:
print("Invalid answer\n")
def get_prediction():
file = read_file()
data = file["coin-result"]
true = 0
for i in data:
if i is True:
true += 1
head = true/len(data)
tail = 1-head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return "head"
elif tail == 1:
return "tail"
elif head > tail:
if rand > head:
return "head"
else:
return "tail"
elif head < tail:
if rand > tail:
return "tail"
else:
return "head"
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return "tail"
else:
return "head"
def read_file():
file = open(url, "r")
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, "w")
data["coin-result"].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == "c":
return "head"
elif answer == "t":
return "tail"
else:
print("Invalid answer")
# OnRun
run = True
print("Welcome to CoinPredictor\n")
loop(run)
'''
file = open("data/data.json", "w")
data['coin-result'].append(False)
data = json.dump(data, file)
print(data)
file.close()'''
|
normal
|
{
"blob_id": "25ff54a969651d365de33f2420c662518dd63738",
"index": 864,
"step-1": "<mask token>\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\n<mask token>\nprint('Welcome to CoinPredictor\\n')\nloop(run)\n<mask token>\n",
"step-3": "<mask token>\nurl = 'data/data.json'\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\nrun = True\nprint('Welcome to CoinPredictor\\n')\nloop(run)\n<mask token>\n",
"step-4": "import json\nimport random\nfrom time import sleep\nurl = 'data/data.json'\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n while run:\n error_margin = str(error / simulations * 100) + '%'\n prediction = get_prediction()\n print('Prediction: %s' % prediction)\n print('Error Margin: %s' % error_margin)\n print('Flip the coin and insert your result:\\nh = head\\nt = tail')\n answer = input()\n comparator = ''\n if answer is 'h' or answer is 't':\n if answer == 't':\n write_data(False)\n comparator = 'tail'\n elif answer == 'h':\n write_data(True)\n comparator = 'head'\n simulations += 1\n if comparator != prediction:\n error += 1\n else:\n print('Invalid answer\\n')\n\n\ndef get_prediction():\n file = read_file()\n data = file['coin-result']\n true = 0\n for i in data:\n if i is True:\n true += 1\n head = true / len(data)\n tail = 1 - head\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n if head == 1:\n return 'head'\n elif tail == 1:\n return 'tail'\n elif head > tail:\n if rand > head:\n return 'head'\n else:\n return 'tail'\n elif head < tail:\n if rand > tail:\n return 'tail'\n else:\n return 'head'\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return 'tail'\n else:\n return 'head'\n\n\ndef read_file():\n file = open(url, 'r')\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, 'w')\n data['coin-result'].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == 'c':\n return 'head'\n elif answer == 't':\n return 'tail'\n else:\n print('Invalid answer')\n\n\nrun = True\nprint('Welcome to CoinPredictor\\n')\nloop(run)\n<mask token>\n",
"step-5": "import json\nimport random\nfrom time import sleep\n\nurl = \"data/data.json\"\n\n\ndef loop(run_state):\n error = 1\n simulations = 1\n\n while run:\n\n error_margin = str((error/simulations) * 100) + \"%\"\n prediction = get_prediction()\n print(\"Prediction: %s\" % prediction)\n print(\"Error Margin: %s\" % error_margin)\n print(\"Flip the coin and insert your result:\\nh = head\\nt = tail\")\n answer = input()\n comparator = \"\"\n\n if answer is \"h\" or answer is \"t\":\n if answer == \"t\":\n write_data(False)\n comparator = \"tail\"\n\n elif answer == \"h\":\n write_data(True)\n comparator = \"head\"\n\n simulations += 1\n\n if comparator != prediction:\n error += 1\n\n else:\n print(\"Invalid answer\\n\")\n\n\ndef get_prediction():\n file = read_file()\n data = file[\"coin-result\"]\n true = 0\n\n for i in data:\n if i is True:\n true += 1\n\n head = true/len(data)\n tail = 1-head\n\n if head + tail == 1:\n rand = random.uniform(0.0, 1.0)\n\n if head == 1:\n return \"head\"\n\n elif tail == 1:\n return \"tail\"\n\n elif head > tail:\n if rand > head:\n return \"head\"\n else:\n return \"tail\"\n\n elif head < tail:\n if rand > tail:\n return \"tail\"\n else:\n return \"head\"\n\n elif head == tail:\n rand = random.randint(0, 1)\n if rand == 0:\n return \"tail\"\n else:\n return \"head\"\n\n\ndef read_file():\n file = open(url, \"r\")\n data = json.loads(file.read())\n file.close()\n return data\n\n\ndef write_data(value):\n data = read_file()\n file = open(url, \"w\")\n data[\"coin-result\"].append(value)\n json.dump(data, file)\n file.close()\n\n\ndef get_answer(answer):\n if answer == \"c\":\n return \"head\"\n elif answer == \"t\":\n return \"tail\"\n else:\n print(\"Invalid answer\")\n\n\n# OnRun\nrun = True\nprint(\"Welcome to CoinPredictor\\n\")\nloop(run)\n\n\n'''\n\nfile = open(\"data/data.json\", \"w\")\ndata['coin-result'].append(False)\ndata = json.dump(data, file)\nprint(data)\nfile.close()'''\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""
Listing 1.36
Python extends the basic grouping syntax to add named groups. Using
names to refer to groups makes it easier to modify the pattern over
time, without having to also modify the code using the match results.
To set the name of a group, use the syntax (?P<name>pattern)
Use groupdict() to retrieve the dictionary mapping group names to
substrings from the match. Named patterns are included in the
ordered sequence returned by groups() as well.
"""
import re
def main():
text = "This is some text -- with punctuation."
print(text)
print()
patterns = [
r"^(?P<first_word>\w+)",
r"(?P<last_word>\w+)\S*$",
r"(?P<t_word>\bt\w+)\W+(?P<other_word>\w+)",
r"(?P<ends_with_t>\w+t)\b"
]
for pattern in patterns:
regex = re.compile(pattern)
match = regex.search(text)
print(f"'{pattern}'")
print(f" ", match.groups())
print(f" ", match.groupdict())
print()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "be6a2e45f735fe578392b03c3030890b6cd5b4bc",
"index": 2865,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n text = 'This is some text -- with punctuation.'\n print(text)\n print()\n patterns = ['^(?P<first_word>\\\\w+)', '(?P<last_word>\\\\w+)\\\\S*$',\n '(?P<t_word>\\\\bt\\\\w+)\\\\W+(?P<other_word>\\\\w+)',\n '(?P<ends_with_t>\\\\w+t)\\\\b']\n for pattern in patterns:\n regex = re.compile(pattern)\n match = regex.search(text)\n print(f\"'{pattern}'\")\n print(f' ', match.groups())\n print(f' ', match.groupdict())\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n text = 'This is some text -- with punctuation.'\n print(text)\n print()\n patterns = ['^(?P<first_word>\\\\w+)', '(?P<last_word>\\\\w+)\\\\S*$',\n '(?P<t_word>\\\\bt\\\\w+)\\\\W+(?P<other_word>\\\\w+)',\n '(?P<ends_with_t>\\\\w+t)\\\\b']\n for pattern in patterns:\n regex = re.compile(pattern)\n match = regex.search(text)\n print(f\"'{pattern}'\")\n print(f' ', match.groups())\n print(f' ', match.groupdict())\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport re\n\n\ndef main():\n text = 'This is some text -- with punctuation.'\n print(text)\n print()\n patterns = ['^(?P<first_word>\\\\w+)', '(?P<last_word>\\\\w+)\\\\S*$',\n '(?P<t_word>\\\\bt\\\\w+)\\\\W+(?P<other_word>\\\\w+)',\n '(?P<ends_with_t>\\\\w+t)\\\\b']\n for pattern in patterns:\n regex = re.compile(pattern)\n match = regex.search(text)\n print(f\"'{pattern}'\")\n print(f' ', match.groups())\n print(f' ', match.groupdict())\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nListing 1.36\n\nPython extends the basic grouping syntax to add named groups. Using\nnames to refer to groups makes it easier to modify the pattern over\ntime, without having to also modify the code using the match results.\n\nTo set the name of a group, use the syntax (?P<name>pattern)\n\nUse groupdict() to retrieve the dictionary mapping group names to\nsubstrings from the match. Named patterns are included in the\nordered sequence returned by groups() as well.\n\"\"\"\nimport re\n\n\ndef main():\n text = \"This is some text -- with punctuation.\"\n print(text)\n print()\n\n patterns = [\n r\"^(?P<first_word>\\w+)\",\n r\"(?P<last_word>\\w+)\\S*$\",\n r\"(?P<t_word>\\bt\\w+)\\W+(?P<other_word>\\w+)\",\n r\"(?P<ends_with_t>\\w+t)\\b\"\n ]\n\n for pattern in patterns:\n regex = re.compile(pattern)\n match = regex.search(text)\n print(f\"'{pattern}'\")\n print(f\" \", match.groups())\n print(f\" \", match.groupdict())\n print()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import argparse
import debug.debug as dbg
import helper.helper as hlp
import prep.preprocessor as pre
import sample.sample as s
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
# Files as folds?
if number_partitions is None or number_partitions == 0: # Yes
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else: # No
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,
do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,
files_as_folds=partitions_from_files, do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, "y_pred.txt")
if do_concat:
dbg.write_list_to_dir(dir_train, y, "y_true.txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Print evaluation metrics for cross validating an HSV classifier.")
parser.add_argument("dir_train",
help="Directory containing all feature XMLs and label CSVs for cross validating the "
"classifier. CSVs need to have the same file name as their corresponding XML.")
parser.add_argument("-c", "--C_value", help="Omit the grid search and directly specify a C value.", type=float)
parser.add_argument("-g", "--gamma_value", help="Omit the grid search and directly specify a gamma value.",
type=float)
parser.add_argument("-p", "--number_partitions",
help="Set the number of partitions for cross validation. If omitted, take each file "
"as a partition.", type=int)
parser.add_argument("-s", "--subsampling", help="Subsample majority class", action="store_true")
parser.add_argument("-wl", "--write_labels",
help="Write both true and predicted labels of the eval file(s) to TXT files.",
action="store_true")
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)
|
normal
|
{
"blob_id": "4a63431aa71ca3f4b75fcd89a50bf599e7717645",
"index": 2442,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Print evaluation metrics for cross validating an HSV classifier.')\n parser.add_argument('dir_train', help=\n 'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'\n )\n parser.add_argument('-c', '--C_value', help=\n 'Omit the grid search and directly specify a C value.', type=float)\n parser.add_argument('-g', '--gamma_value', help=\n 'Omit the grid search and directly specify a gamma value.', type=float)\n parser.add_argument('-p', '--number_partitions', help=\n 'Set the number of partitions for cross validation. If omitted, take each file as a partition.'\n , type=int)\n parser.add_argument('-s', '--subsampling', help=\n 'Subsample majority class', action='store_true')\n parser.add_argument('-wl', '--write_labels', help=\n 'Write both true and predicted labels of the eval file(s) to TXT files.'\n , action='store_true')\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.\n number_partitions, args.subsampling, args.write_labels)\n",
"step-4": "import argparse\nimport debug.debug as dbg\nimport helper.helper as hlp\nimport prep.preprocessor as pre\nimport sample.sample as s\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Print evaluation metrics for cross validating an HSV classifier.')\n parser.add_argument('dir_train', help=\n 'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'\n )\n parser.add_argument('-c', '--C_value', help=\n 'Omit the grid search and directly specify a C value.', type=float)\n parser.add_argument('-g', '--gamma_value', help=\n 'Omit the grid search and directly specify a gamma value.', type=float)\n parser.add_argument('-p', '--number_partitions', help=\n 'Set the number of partitions for cross validation. If omitted, take each file as a partition.'\n , type=int)\n parser.add_argument('-s', '--subsampling', help=\n 'Subsample majority class', action='store_true')\n parser.add_argument('-wl', '--write_labels', help=\n 'Write both true and predicted labels of the eval file(s) to TXT files.'\n , action='store_true')\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.\n number_partitions, args.subsampling, args.write_labels)\n",
"step-5": "import argparse\n\nimport debug.debug as dbg\nimport helper.helper as hlp\nimport prep.preprocessor as pre\nimport sample.sample as s\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n\n # Files as folds?\n if number_partitions is None or number_partitions == 0: # Yes\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else: # No\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,\n do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,\n files_as_folds=partitions_from_files, do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, \"y_pred.txt\")\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, \"y_true.txt\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Print evaluation metrics for cross validating an HSV classifier.\")\n parser.add_argument(\"dir_train\",\n help=\"Directory containing all feature XMLs and label CSVs for cross validating the \"\n \"classifier. CSVs need to have the same file name as their corresponding XML.\")\n parser.add_argument(\"-c\", \"--C_value\", help=\"Omit the grid search and directly specify a C value.\", type=float)\n parser.add_argument(\"-g\", \"--gamma_value\", help=\"Omit the grid search and directly specify a gamma value.\",\n type=float)\n parser.add_argument(\"-p\", \"--number_partitions\",\n help=\"Set the number of partitions for cross validation. If omitted, take each file \"\n \"as a partition.\", type=int)\n parser.add_argument(\"-s\", \"--subsampling\", help=\"Subsample majority class\", action=\"store_true\")\n parser.add_argument(\"-wl\", \"--write_labels\",\n help=\"Write both true and predicted labels of the eval file(s) to TXT files.\",\n action=\"store_true\")\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from packages import data as DATA
from packages import plot as PLOT
from packages import universal as UNIVERSAL
from packages import currency_pair as CP
import matplotlib.pyplot as plt
import mpl_finance as mpf
from packages import db as DB
import CONSTANTS
import datetime
from matplotlib.pylab import date2num
from matplotlib.widgets import Cursor
pgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)
tablename='klines_full_vol_50'
rows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300')
a=1
alist = []
vols_bid = []
vols_ask = []
diff_bid_2_ask = []
diff_bid_2_ask_in_past_2_epochs = []
diff_bid_2_ask_in_past_3_epochs = []
diff_bid_2_ask_in_past_5_epochs = []
diff_bid_2_ask_in_past_10_epochs = []
diff_bid_2_ask_in_past_20_epochs = []
avg_buys=[]
avg_sells=[]
avg_buy_diff_sell=[]
avg_amounts=[]
dates = []
cnt = 0
date = date2num(datetime.datetime.fromtimestamp(rows[0][1]))
for cnt in range(20, len(rows)):
row_previous2=rows[cnt-2]
row_previous1 = rows[cnt - 1]
row = rows[cnt]
open=row[2]
high=row[3]
low=row[4]
close=row[5]
vol=row[6]
vol_buy,vol_sell=row[7:9]
avg_buy, avg_sell, avg_amount_per_trade=row[-3:]
date = date + 1
data = (date, open, high, low, close)
alist.append(data)
vols_bid.append(-vol_buy)
vols_ask.append(vol_sell)
diff_bid_2_ask.append(vol_buy-vol_sell)
diff_bid_2_ask_in_past_2_epochs.append(
vol_buy + row_previous1[7] - vol_sell-row_previous1[8])
diff_bid_2_ask_in_past_3_epochs.append(
vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8])
avg_buy_diff_sell.append(avg_buy-avg_sell)
avg_amounts.append(avg_amount_per_trade*100)
dates.append(date)
# fig, ax = plt.subplots(figsize=(32, 18))
# fig.subplots_adjust(bottom=0.5)
# mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0)
# plt.grid(True)
# # 设置日期刻度旋转的角度
# plt.xticks(rotation=30)
# plt.title('wanda yuanxian 17')
# plt.xlabel('Date')
# plt.ylabel('Price')
# # x轴的刻度为日期
# ax.xaxis_date()
fig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))
mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')
axes[0].set_title('BTC')
axes[0].set_ylabel('价格')
axes[0].grid(True)
axes[0].xaxis_date()
# axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5)
# axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5)
# axes[1].grid(True)
axes[1].plot(dates, avg_buy_diff_sell, c='orange')
axes[1].plot(dates, avg_amounts, c='blue')
axes[1].set_ylabel('成交量')
axes[1].grid(True)
axes[2].plot(dates, diff_bid_2_ask, c='green')
axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')
axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')
axes[2].set_ylabel('成交量')
axes[2].grid(True)
axes[2].set_ylabel('买卖均价')
axes[2].grid(True)
plt.show()
|
normal
|
{
"blob_id": "9aaaa744780dbd32b14e09a34976a2a0a3ce34f7",
"index": 7864,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\n<mask token>\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n",
"step-3": "<mask token>\npgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename = 'klines_full_vol_50'\nrows = pgmanager.select('select * from ' + tablename +\n ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na = 1\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys = []\navg_sells = []\navg_buy_diff_sell = []\navg_amounts = []\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n",
"step-4": "from packages import data as DATA\nfrom packages import plot as PLOT\nfrom packages import universal as UNIVERSAL\nfrom packages import currency_pair as CP\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\nfrom packages import db as DB\nimport CONSTANTS\nimport datetime\nfrom matplotlib.pylab import date2num\nfrom matplotlib.widgets import Cursor\npgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename = 'klines_full_vol_50'\nrows = pgmanager.select('select * from ' + tablename +\n ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na = 1\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys = []\navg_sells = []\navg_buy_diff_sell = []\navg_amounts = []\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n",
"step-5": "from packages import data as DATA\nfrom packages import plot as PLOT\nfrom packages import universal as UNIVERSAL\nfrom packages import currency_pair as CP\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\nfrom packages import db as DB\nimport CONSTANTS\nimport datetime\nfrom matplotlib.pylab import date2num\nfrom matplotlib.widgets import Cursor\n\npgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename='klines_full_vol_50'\n\nrows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na=1\n\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys=[]\navg_sells=[]\navg_buy_diff_sell=[]\navg_amounts=[]\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\n\nfor cnt in range(20, len(rows)):\n row_previous2=rows[cnt-2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open=row[2]\n high=row[3]\n low=row[4]\n close=row[5]\n vol=row[6]\n vol_buy,vol_sell=row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade=row[-3:]\n date = date + 1\n data = (date, open, high, low, close)\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy-vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(\n vol_buy + row_previous1[7] - vol_sell-row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(\n vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8])\n avg_buy_diff_sell.append(avg_buy-avg_sell)\n avg_amounts.append(avg_amount_per_trade*100)\n dates.append(date)\n\n# fig, ax = plt.subplots(figsize=(32, 18))\n# fig.subplots_adjust(bottom=0.5)\n# mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0)\n# plt.grid(True)\n# # 设置日期刻度旋转的角度\n# plt.xticks(rotation=30)\n# plt.title('wanda yuanxian 17')\n# plt.xlabel('Date')\n# plt.ylabel('Price')\n# # x轴的刻度为日期\n# ax.xaxis_date()\n\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\n\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\n\n# axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5)\n# axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5)\n# axes[1].grid(True)\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\n\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\n\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\n\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 18:46:26 2019
@author: kiran
"""
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
import statsmodels as sm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima_model import ARIMA
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15,6
#importing library and preparing dataset
mylynx_df = pd.read_csv('LYNXdata.csv', header = 0, names = ['year','trappings'], index_col=0)
mylynxts = pd.Series(mylynx_df['trappings'].values, index = pd.DatetimeIndex(data=(tuple(pd.date_range(31/12/1821, periods = 114, freq = 'A-DEC'))), freq= 'A-DEC'))
#Dickey-fuller test
def stationarity_test(mylynxts):
from statsmodels.tsa.stattools import adfuller
print('Results of Dickey-Fuller Test:')
df_test = adfuller(mylynxts, autolag='AIC')
df_output = pd.Series(df_test[0:4], index=['Test Statistic','p-value','#lags_used','Number of Observation Used'])
print(df_output)
stationarity_test(mylynxts)
#Arima Model
model = ARIMA(mylynxts, order=(3,0,0))
results_AR = model.fit()
plt.plot(mylynxts)
plt.plot(results_AR.fittedvalues, color='red')
'''
information criteria and resdiuals need to be checked.
'''
#information summary
results_AR.summary()
#residual plot
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = plot_acf(results_AR.resid, lags=20, ax = ax1)
#importing function for nomral distribution
from scipy.stats import norm
plt.figure(figsize=(10,6))
plt.hist(results_AR.resid, bins='auto', density=True, rwidth=0.85, label='residuals') #density true - norm.dist line curve
mu,std = norm.fit(results_AR.resid)
xmin,xmax = plt.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu,std)
plt.plot(x,p,'m',linewidth=2)
plt.grid(axis='y',alpha=0.2)
plt.xlabel('Residuals')
plt.ylabel('Density')
plt.title('Residuals 2,0,0 vs Normal Distribution - Mean ='+ str(round(mu,2))+', std ='+str(round(std,2)))
plt.show()
|
normal
|
{
"blob_id": "8e28135da60f8e11459697c4ae9c63e60c437d7a",
"index": 9501,
"step-1": "<mask token>\n\n\ndef stationarity_test(mylynxts):\n from statsmodels.tsa.stattools import adfuller\n print('Results of Dickey-Fuller Test:')\n df_test = adfuller(mylynxts, autolag='AIC')\n df_output = pd.Series(df_test[0:4], index=['Test Statistic', 'p-value',\n '#lags_used', 'Number of Observation Used'])\n print(df_output)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef stationarity_test(mylynxts):\n from statsmodels.tsa.stattools import adfuller\n print('Results of Dickey-Fuller Test:')\n df_test = adfuller(mylynxts, autolag='AIC')\n df_output = pd.Series(df_test[0:4], index=['Test Statistic', 'p-value',\n '#lags_used', 'Number of Observation Used'])\n print(df_output)\n\n\nstationarity_test(mylynxts)\n<mask token>\nplt.plot(mylynxts)\nplt.plot(results_AR.fittedvalues, color='red')\n<mask token>\nresults_AR.summary()\n<mask token>\nplt.figure(figsize=(10, 6))\nplt.hist(results_AR.resid, bins='auto', density=True, rwidth=0.85, label=\n 'residuals')\n<mask token>\nplt.plot(x, p, 'm', linewidth=2)\nplt.grid(axis='y', alpha=0.2)\nplt.xlabel('Residuals')\nplt.ylabel('Density')\nplt.title('Residuals 2,0,0 vs Normal Distribution - Mean =' + str(round(mu,\n 2)) + ', std =' + str(round(std, 2)))\nplt.show()\n",
"step-3": "<mask token>\nrcParams['figure.figsize'] = 15, 6\nmylynx_df = pd.read_csv('LYNXdata.csv', header=0, names=['year',\n 'trappings'], index_col=0)\nmylynxts = pd.Series(mylynx_df['trappings'].values, index=pd.DatetimeIndex(\n data=tuple(pd.date_range(31 / 12 / 1821, periods=114, freq='A-DEC')),\n freq='A-DEC'))\n\n\ndef stationarity_test(mylynxts):\n from statsmodels.tsa.stattools import adfuller\n print('Results of Dickey-Fuller Test:')\n df_test = adfuller(mylynxts, autolag='AIC')\n df_output = pd.Series(df_test[0:4], index=['Test Statistic', 'p-value',\n '#lags_used', 'Number of Observation Used'])\n print(df_output)\n\n\nstationarity_test(mylynxts)\nmodel = ARIMA(mylynxts, order=(3, 0, 0))\nresults_AR = model.fit()\nplt.plot(mylynxts)\nplt.plot(results_AR.fittedvalues, color='red')\n<mask token>\nresults_AR.summary()\nfig = plt.figure(figsize=(12, 8))\nax1 = fig.add_subplot(211)\nfig = plot_acf(results_AR.resid, lags=20, ax=ax1)\n<mask token>\nplt.figure(figsize=(10, 6))\nplt.hist(results_AR.resid, bins='auto', density=True, rwidth=0.85, label=\n 'residuals')\nmu, std = norm.fit(results_AR.resid)\nxmin, xmax = plt.xlim()\nx = np.linspace(xmin, xmax, 100)\np = norm.pdf(x, mu, std)\nplt.plot(x, p, 'm', linewidth=2)\nplt.grid(axis='y', alpha=0.2)\nplt.xlabel('Residuals')\nplt.ylabel('Density')\nplt.title('Residuals 2,0,0 vs Normal Distribution - Mean =' + str(round(mu,\n 2)) + ', std =' + str(round(std, 2)))\nplt.show()\n",
"step-4": "<mask token>\nimport matplotlib.pylab as plt\nimport pandas as pd\nimport numpy as np\nimport statsmodels as sm\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom statsmodels.tsa.stattools import acf, pacf\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 15, 6\nmylynx_df = pd.read_csv('LYNXdata.csv', header=0, names=['year',\n 'trappings'], index_col=0)\nmylynxts = pd.Series(mylynx_df['trappings'].values, index=pd.DatetimeIndex(\n data=tuple(pd.date_range(31 / 12 / 1821, periods=114, freq='A-DEC')),\n freq='A-DEC'))\n\n\ndef stationarity_test(mylynxts):\n from statsmodels.tsa.stattools import adfuller\n print('Results of Dickey-Fuller Test:')\n df_test = adfuller(mylynxts, autolag='AIC')\n df_output = pd.Series(df_test[0:4], index=['Test Statistic', 'p-value',\n '#lags_used', 'Number of Observation Used'])\n print(df_output)\n\n\nstationarity_test(mylynxts)\nmodel = ARIMA(mylynxts, order=(3, 0, 0))\nresults_AR = model.fit()\nplt.plot(mylynxts)\nplt.plot(results_AR.fittedvalues, color='red')\n<mask token>\nresults_AR.summary()\nfig = plt.figure(figsize=(12, 8))\nax1 = fig.add_subplot(211)\nfig = plot_acf(results_AR.resid, lags=20, ax=ax1)\nfrom scipy.stats import norm\nplt.figure(figsize=(10, 6))\nplt.hist(results_AR.resid, bins='auto', density=True, rwidth=0.85, label=\n 'residuals')\nmu, std = norm.fit(results_AR.resid)\nxmin, xmax = plt.xlim()\nx = np.linspace(xmin, xmax, 100)\np = norm.pdf(x, mu, std)\nplt.plot(x, p, 'm', linewidth=2)\nplt.grid(axis='y', alpha=0.2)\nplt.xlabel('Residuals')\nplt.ylabel('Density')\nplt.title('Residuals 2,0,0 vs Normal Distribution - Mean =' + str(round(mu,\n 2)) + ', std =' + str(round(std, 2)))\nplt.show()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 24 18:46:26 2019\n@author: kiran\n\"\"\"\nimport matplotlib.pylab as plt\nimport pandas as pd\nimport numpy as np\nimport statsmodels as sm\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom statsmodels.tsa.stattools import acf, pacf\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 15,6\n\n#importing library and preparing dataset\nmylynx_df = pd.read_csv('LYNXdata.csv', header = 0, names = ['year','trappings'], index_col=0)\nmylynxts = pd.Series(mylynx_df['trappings'].values, index = pd.DatetimeIndex(data=(tuple(pd.date_range(31/12/1821, periods = 114, freq = 'A-DEC'))), freq= 'A-DEC'))\n\n#Dickey-fuller test\ndef stationarity_test(mylynxts):\n from statsmodels.tsa.stattools import adfuller\n print('Results of Dickey-Fuller Test:')\n df_test = adfuller(mylynxts, autolag='AIC')\n df_output = pd.Series(df_test[0:4], index=['Test Statistic','p-value','#lags_used','Number of Observation Used'])\n print(df_output)\nstationarity_test(mylynxts)\n\n#Arima Model\nmodel = ARIMA(mylynxts, order=(3,0,0))\nresults_AR = model.fit()\nplt.plot(mylynxts)\nplt.plot(results_AR.fittedvalues, color='red')\n\n'''\ninformation criteria and resdiuals need to be checked.\n'''\n#information summary\nresults_AR.summary()\n\n\n#residual plot\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = plot_acf(results_AR.resid, lags=20, ax = ax1)\n\n#importing function for nomral distribution\nfrom scipy.stats import norm\nplt.figure(figsize=(10,6))\nplt.hist(results_AR.resid, bins='auto', density=True, rwidth=0.85, label='residuals') #density true - norm.dist line curve\nmu,std = norm.fit(results_AR.resid)\nxmin,xmax = plt.xlim()\nx = np.linspace(xmin,xmax,100)\np = norm.pdf(x,mu,std)\nplt.plot(x,p,'m',linewidth=2)\nplt.grid(axis='y',alpha=0.2)\nplt.xlabel('Residuals')\nplt.ylabel('Density')\nplt.title('Residuals 2,0,0 vs Normal Distribution - Mean ='+ str(round(mu,2))+', std ='+str(round(std,2)))\nplt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
def getDescentPeriods(self, prices: List[int]) -> int:
ans = 1 # prices[0]
dp = 1
for i in range(1, len(prices)):
if prices[i] == prices[i - 1] - 1:
dp += 1
else:
dp = 1
ans += dp
return ans
|
normal
|
{
"blob_id": "d10468d2d0aefa19a7d225bfffad03ec6cb6e082",
"index": 4079,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def getDescentPeriods(self, prices: List[int]) ->int:\n ans = 1\n dp = 1\n for i in range(1, len(prices)):\n if prices[i] == prices[i - 1] - 1:\n dp += 1\n else:\n dp = 1\n ans += dp\n return ans\n",
"step-4": "class Solution:\n def getDescentPeriods(self, prices: List[int]) -> int:\n ans = 1 # prices[0]\n dp = 1\n\n for i in range(1, len(prices)):\n if prices[i] == prices[i - 1] - 1:\n dp += 1\n else:\n dp = 1\n ans += dp\n\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
app.layout = html.Div(
children=[
html.Label('Dropdowm'),
dcc.Dropdown(
id='my-dropdown',
options=[
{'label': 'İstanbul', 'value': 34}, # seçeneleri dict tutan liste olarak veririz
{'label': 'Ankara', 'value': 6},
{'label': 'Erzurum', 'value': 25},
],
multi=True,
value=34,
searchable=True,
),
html.Label('Radio'),
dcc.RadioItems(
id='my-radio',
options=[
{'label': 'İstanbul', 'value': 34},
{'label': 'Ankara', 'value': 6},
{'label': 'Erzurum', 'value': 25},
],
value=34,
),
html.Label('Slider'),
dcc.Slider(
id='my-slider',
min=0,
max=20,
step=0.5,
value=10,
marks={i: i for i in range(0, 21)}
),
]
)
if __name__ == '__main__':
app.run_server()
|
normal
|
{
"blob_id": "443bf59bc3c5ed2114f0c276aa7134ff5bf7fb64",
"index": 7264,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run_server()\n",
"step-3": "<mask token>\napp = dash.Dash()\napp.layout = html.Div(children=[html.Label('Dropdowm'), dcc.Dropdown(id=\n 'my-dropdown', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], multi=True,\n value=34, searchable=True), html.Label('Radio'), dcc.RadioItems(id=\n 'my-radio', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], value=34),\n html.Label('Slider'), dcc.Slider(id='my-slider', min=0, max=20, step=\n 0.5, value=10, marks={i: i for i in range(0, 21)})])\nif __name__ == '__main__':\n app.run_server()\n",
"step-4": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\napp = dash.Dash()\napp.layout = html.Div(children=[html.Label('Dropdowm'), dcc.Dropdown(id=\n 'my-dropdown', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], multi=True,\n value=34, searchable=True), html.Label('Radio'), dcc.RadioItems(id=\n 'my-radio', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], value=34),\n html.Label('Slider'), dcc.Slider(id='my-slider', min=0, max=20, step=\n 0.5, value=10, marks={i: i for i in range(0, 21)})])\nif __name__ == '__main__':\n app.run_server()\n",
"step-5": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\napp = dash.Dash()\n\napp.layout = html.Div(\n children=[\n html.Label('Dropdowm'),\n dcc.Dropdown(\n id='my-dropdown',\n options=[\n {'label': 'İstanbul', 'value': 34}, # seçeneleri dict tutan liste olarak veririz\n {'label': 'Ankara', 'value': 6},\n {'label': 'Erzurum', 'value': 25},\n ],\n multi=True,\n value=34,\n searchable=True,\n ),\n html.Label('Radio'),\n dcc.RadioItems(\n id='my-radio',\n options=[\n {'label': 'İstanbul', 'value': 34},\n {'label': 'Ankara', 'value': 6},\n {'label': 'Erzurum', 'value': 25},\n ],\n value=34,\n ),\n html.Label('Slider'),\n dcc.Slider(\n id='my-slider',\n min=0,\n max=20,\n step=0.5,\n value=10,\n marks={i: i for i in range(0, 21)}\n ),\n ]\n)\n\nif __name__ == '__main__':\n app.run_server()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
from bigdl.dllib.utils.common import DOUBLEMAX
from bigdl.orca.learn.optimizers.schedule import Scheduler
from bigdl.dllib.utils.log4Error import invalidInputError
from typing import (Any, Optional, Dict, TYPE_CHECKING)
if TYPE_CHECKING:
from bigdl.dllib.optim import optimizer
import numpy as np
class Optimizer(ABC):
@abstractmethod
def get_optimizer(self):
pass
class SGD(Optimizer):
"""
A plain implementation of SGD
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
:param momentum momentum
:param dampening dampening for momentum
:param nesterov enables Nesterov momentum
:param learningrates 1D tensor of individual learning rates
:param weightdecays 1D tensor of individual weight decays
>>> sgd = SGD()
creating: createDefault
creating: createSGD
"""
def __init__(self,
learningrate: float = 1e-3,
learningrate_decay: float = 0.0,
weightdecay: float = 0.0,
momentum: float = 0.0,
dampening: float = DOUBLEMAX,
nesterov: bool = False,
learningrate_schedule: Optional["Scheduler"] = None,
learningrates: Optional["np.ndarray"] = None,
weightdecays: Optional["np.ndarray"] = None) -> None:
from bigdl.dllib.optim.optimizer import SGD as BSGD
invalidInputError(isinstance(learningrate_schedule, Scheduler),
"learningrate_schedule should be an "
"bigdl.orca.learn.optimizers.schedule.Scheduler,"
" but got {learningrate_schedule}")
self.optimizer = BSGD(learningrate,
learningrate_decay,
weightdecay,
momentum,
dampening,
nesterov,
learningrate_schedule.get_scheduler(), # type: ignore
learningrates,
weightdecays,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.SGD":
return self.optimizer
class Adagrad(Optimizer):
"""
An implementation of Adagrad. See the original paper:
http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
>>> adagrad = Adagrad()
creating: createAdagrad
"""
def __init__(self,
learningrate: float = 1e-3,
learningrate_decay: float = 0.0,
weightdecay: float = 0.0) -> None:
from bigdl.dllib.optim.optimizer import Adagrad as BAdagrad
self.optimizer = BAdagrad(learningrate, learningrate_decay,
weightdecay, bigdl_type="float")
def get_optimizer(self) -> "optimizer.Adagrad":
return self.optimizer
class LBFGS(Optimizer):
"""
This implementation of L-BFGS relies on a user-provided line
search function (state.lineSearch). If this function is not
provided, then a simple learningRate is used to produce fixed
size steps. Fixed size steps are much less costly than line
searches, and can be useful for stochastic problems.
The learning rate is used even when a line search is provided.
This is also useful for large-scale stochastic problems, where
opfunc is a noisy approximation of f(x). In that case, the learning
rate allows a reduction of confidence in the step size.
:param max_iter Maximum number of iterations allowed
:param max_eval Maximum number of function evaluations
:param tolfun Termination tolerance on the first-order optimality
:param tolx Termination tol on progress in terms of func/param changes
:param ncorrection
:param learningrate
:param verbose
:param linesearch A line search function
:param linesearch_options If no line search provided, then a fixed step size is used
>>> lbfgs = LBFGS()
creating: createLBFGS
"""
def __init__(self,
max_iter: int = 20,
max_eval: float = DOUBLEMAX,
tolfun: float = 1e-5,
tolx: float = 1e-9,
ncorrection: int = 100,
learningrate: float = 1.0,
verbose: bool = False,
linesearch: Any = None,
linesearch_options: Optional[Dict[Any, Any]]=None) -> None:
from bigdl.dllib.optim.optimizer import LBFGS as BLBFGS
self.optimizer = BLBFGS(
max_iter,
max_eval,
tolfun,
tolx,
ncorrection,
learningrate,
verbose,
linesearch,
linesearch_options,
bigdl_type="float"
)
def get_optimizer(self) -> "optimizer.LBFGS":
return self.optimizer
class Adadelta(Optimizer):
"""
Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701
:param decayrate interpolation parameter rho
:param epsilon for numerical stability
>>> adagrad = Adadelta()
creating: createAdadelta
"""
def __init__(self,
decayrate: float = 0.9,
epsilon: float = 1e-10) -> None:
from bigdl.dllib.optim.optimizer import Adadelta as BAdadelta
self.optimizer = BAdadelta(decayrate,
epsilon,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.Adadelta":
return self.optimizer
class Adam(Optimizer):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adam = Adam()
creating: createAdam
"""
def __init__(self,
learningrate: float = 1e-3,
learningrate_decay: float = 0.0,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-8) -> None:
from bigdl.dllib.optim.optimizer import Adam as BAdam
self.optimizer = BAdam(learningrate,
learningrate_decay,
beta1,
beta2,
epsilon,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.Adam":
return self.optimizer
class ParallelAdam(Optimizer):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> pAdam = ParallelAdam()
creating: createParallelAdam
"""
def __init__(self,
learningrate: float = 1e-3,
learningrate_decay: float = 0.0,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-8,
parallel_num: int = -1) -> None:
from bigdl.dllib.optim.optimizer import ParallelAdam as BParallelAdam
self.optimizer = BParallelAdam(learningrate,
learningrate_decay,
beta1,
beta2,
epsilon,
parallel_num,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.ParallelAdam":
return self.optimizer
class Ftrl(Optimizer):
"""
An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.
Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.
:param learningrate learning rate
:param learningrate_power double, must be less or equal to zero. Default is -0.5.
:param initial_accumulator_value double, the starting value for accumulators,
require zero or positive values.
:param l1_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_shrinkage_regularization_strength double, must be greater or equal to zero.
Default is zero. This differs from l2RegularizationStrength above. L2 above is a
stabilization penalty, whereas this one is a magnitude penalty.
>>> ftrl = Ftrl()
creating: createFtrl
>>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)
creating: createFtrl
"""
def __init__(self,
learningrate: float = 1e-3,
learningrate_power: float = -0.5,
initial_accumulator_value: float = 0.1,
l1_regularization_strength: float = 0.0,
l2_regularization_strength: float = 0.0,
l2_shrinkage_regularization_strength: float = 0.0) -> None:
from bigdl.dllib.optim.optimizer import Ftrl as BFtrl
self.optimizer = BFtrl(learningrate,
learningrate_power,
initial_accumulator_value,
l1_regularization_strength,
l2_regularization_strength,
l2_shrinkage_regularization_strength,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.Ftrl":
return self.optimizer
class Adamax(Optimizer):
"""
An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adamax()
creating: createAdamax
"""
def __init__(self,
learningrate: float = 0.002,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-38) -> None:
from bigdl.dllib.optim.optimizer import Adamax as BAdamax
self.optimizer = BAdamax(learningrate,
beta1,
beta2,
epsilon,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.Adamax":
return self.optimizer
class RMSprop(Optimizer):
"""
An implementation of RMSprop
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param decayrate decay rate, also called rho
:param epsilon for numerical stability
>>> adagrad = RMSprop()
creating: createRMSprop
"""
def __init__(self,
learningrate: float = 1e-2,
learningrate_decay: float = 0.0,
decayrate: float = 0.99,
epsilon: float = 1e-8) -> None:
from bigdl.dllib.optim.optimizer import RMSprop as BRMSprop
self.optimizer = BRMSprop(learningrate,
learningrate_decay,
decayrate,
epsilon,
bigdl_type="float")
def get_optimizer(self) -> "optimizer.RMSprop":
return self.optimizer
|
normal
|
{
"blob_id": "ce69f7b7cf8c38845bfe589c83fdd6e43ab50912",
"index": 3708,
"step-1": "<mask token>\n\n\nclass Adam(Optimizer):\n <mask token>\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08\n ) ->None:\n from bigdl.dllib.optim.optimizer import Adam as BAdam\n self.optimizer = BAdam(learningrate, learningrate_decay, beta1,\n beta2, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adam':\n return self.optimizer\n\n\nclass ParallelAdam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> pAdam = ParallelAdam()\n creating: createParallelAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08,\n parallel_num: int=-1) ->None:\n from bigdl.dllib.optim.optimizer import ParallelAdam as BParallelAdam\n self.optimizer = BParallelAdam(learningrate, learningrate_decay,\n beta1, beta2, epsilon, parallel_num, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.ParallelAdam':\n return self.optimizer\n\n\nclass Ftrl(Optimizer):\n \"\"\"\n An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.\n Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.\n\n :param learningrate learning rate\n :param learningrate_power double, must be less or equal to zero. Default is -0.5.\n :param initial_accumulator_value double, the starting value for accumulators,\n require zero or positive values.\n :param l1_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_shrinkage_regularization_strength double, must be greater or equal to zero.\n Default is zero. This differs from l2RegularizationStrength above. L2 above is a\n stabilization penalty, whereas this one is a magnitude penalty.\n >>> ftrl = Ftrl()\n creating: createFtrl\n >>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)\n creating: createFtrl\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_power: float\n =-0.5, initial_accumulator_value: float=0.1,\n l1_regularization_strength: float=0.0, l2_regularization_strength:\n float=0.0, l2_shrinkage_regularization_strength: float=0.0) ->None:\n from bigdl.dllib.optim.optimizer import Ftrl as BFtrl\n self.optimizer = BFtrl(learningrate, learningrate_power,\n initial_accumulator_value, l1_regularization_strength,\n l2_regularization_strength,\n l2_shrinkage_regularization_strength, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Ftrl':\n return self.optimizer\n\n\nclass Adamax(Optimizer):\n \"\"\"\n An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adagrad = Adamax()\n creating: createAdamax\n \"\"\"\n\n def __init__(self, learningrate: float=0.002, beta1: float=0.9, beta2:\n float=0.999, epsilon: float=1e-38) ->None:\n from bigdl.dllib.optim.optimizer import Adamax as BAdamax\n self.optimizer = BAdamax(learningrate, beta1, beta2, epsilon,\n bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adamax':\n return self.optimizer\n\n\nclass RMSprop(Optimizer):\n \"\"\"\n An implementation of RMSprop\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param decayrate decay rate, also called rho\n :param epsilon for numerical stability\n >>> adagrad = RMSprop()\n creating: createRMSprop\n \"\"\"\n\n def __init__(self, learningrate: float=0.01, learningrate_decay: float=\n 0.0, decayrate: float=0.99, epsilon: float=1e-08) ->None:\n from bigdl.dllib.optim.optimizer import RMSprop as BRMSprop\n self.optimizer = BRMSprop(learningrate, learningrate_decay,\n decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.RMSprop':\n return self.optimizer\n",
"step-2": "<mask token>\n\n\nclass Adagrad(Optimizer):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass LBFGS(Optimizer):\n \"\"\"\n This implementation of L-BFGS relies on a user-provided line\n search function (state.lineSearch). If this function is not\n provided, then a simple learningRate is used to produce fixed\n size steps. Fixed size steps are much less costly than line\n searches, and can be useful for stochastic problems.\n The learning rate is used even when a line search is provided.\n This is also useful for large-scale stochastic problems, where\n opfunc is a noisy approximation of f(x). In that case, the learning\n rate allows a reduction of confidence in the step size.\n\n :param max_iter Maximum number of iterations allowed\n :param max_eval Maximum number of function evaluations\n :param tolfun Termination tolerance on the first-order optimality\n :param tolx Termination tol on progress in terms of func/param changes\n :param ncorrection\n :param learningrate\n :param verbose\n :param linesearch A line search function\n :param linesearch_options If no line search provided, then a fixed step size is used\n >>> lbfgs = LBFGS()\n creating: createLBFGS\n \"\"\"\n\n def __init__(self, max_iter: int=20, max_eval: float=DOUBLEMAX, tolfun:\n float=1e-05, tolx: float=1e-09, ncorrection: int=100, learningrate:\n float=1.0, verbose: bool=False, linesearch: Any=None,\n linesearch_options: Optional[Dict[Any, Any]]=None) ->None:\n from bigdl.dllib.optim.optimizer import LBFGS as BLBFGS\n self.optimizer = BLBFGS(max_iter, max_eval, tolfun, tolx,\n ncorrection, learningrate, verbose, linesearch,\n linesearch_options, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.LBFGS':\n return self.optimizer\n\n\nclass Adadelta(Optimizer):\n \"\"\"\n Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701\n\n :param decayrate interpolation parameter rho\n :param epsilon for numerical stability\n >>> adagrad = Adadelta()\n creating: createAdadelta\n \"\"\"\n\n def __init__(self, decayrate: float=0.9, epsilon: float=1e-10) ->None:\n from bigdl.dllib.optim.optimizer import Adadelta as BAdadelta\n self.optimizer = BAdadelta(decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adadelta':\n return self.optimizer\n\n\nclass Adam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adam = Adam()\n creating: createAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08\n ) ->None:\n from bigdl.dllib.optim.optimizer import Adam as BAdam\n self.optimizer = BAdam(learningrate, learningrate_decay, beta1,\n beta2, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adam':\n return self.optimizer\n\n\nclass ParallelAdam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> pAdam = ParallelAdam()\n creating: createParallelAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08,\n parallel_num: int=-1) ->None:\n from bigdl.dllib.optim.optimizer import ParallelAdam as BParallelAdam\n self.optimizer = BParallelAdam(learningrate, learningrate_decay,\n beta1, beta2, epsilon, parallel_num, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.ParallelAdam':\n return self.optimizer\n\n\nclass Ftrl(Optimizer):\n \"\"\"\n An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.\n Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.\n\n :param learningrate learning rate\n :param learningrate_power double, must be less or equal to zero. Default is -0.5.\n :param initial_accumulator_value double, the starting value for accumulators,\n require zero or positive values.\n :param l1_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_shrinkage_regularization_strength double, must be greater or equal to zero.\n Default is zero. This differs from l2RegularizationStrength above. L2 above is a\n stabilization penalty, whereas this one is a magnitude penalty.\n >>> ftrl = Ftrl()\n creating: createFtrl\n >>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)\n creating: createFtrl\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_power: float\n =-0.5, initial_accumulator_value: float=0.1,\n l1_regularization_strength: float=0.0, l2_regularization_strength:\n float=0.0, l2_shrinkage_regularization_strength: float=0.0) ->None:\n from bigdl.dllib.optim.optimizer import Ftrl as BFtrl\n self.optimizer = BFtrl(learningrate, learningrate_power,\n initial_accumulator_value, l1_regularization_strength,\n l2_regularization_strength,\n l2_shrinkage_regularization_strength, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Ftrl':\n return self.optimizer\n\n\nclass Adamax(Optimizer):\n \"\"\"\n An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adagrad = Adamax()\n creating: createAdamax\n \"\"\"\n\n def __init__(self, learningrate: float=0.002, beta1: float=0.9, beta2:\n float=0.999, epsilon: float=1e-38) ->None:\n from bigdl.dllib.optim.optimizer import Adamax as BAdamax\n self.optimizer = BAdamax(learningrate, beta1, beta2, epsilon,\n bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adamax':\n return self.optimizer\n\n\nclass RMSprop(Optimizer):\n \"\"\"\n An implementation of RMSprop\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param decayrate decay rate, also called rho\n :param epsilon for numerical stability\n >>> adagrad = RMSprop()\n creating: createRMSprop\n \"\"\"\n\n def __init__(self, learningrate: float=0.01, learningrate_decay: float=\n 0.0, decayrate: float=0.99, epsilon: float=1e-08) ->None:\n from bigdl.dllib.optim.optimizer import RMSprop as BRMSprop\n self.optimizer = BRMSprop(learningrate, learningrate_decay,\n decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.RMSprop':\n return self.optimizer\n",
"step-3": "<mask token>\n\n\nclass SGD(Optimizer):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Adagrad(Optimizer):\n \"\"\"\n An implementation of Adagrad. See the original paper:\n http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf\n\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param weightdecay weight decay\n >>> adagrad = Adagrad()\n creating: createAdagrad\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, weightdecay: float=0.0) ->None:\n from bigdl.dllib.optim.optimizer import Adagrad as BAdagrad\n self.optimizer = BAdagrad(learningrate, learningrate_decay,\n weightdecay, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adagrad':\n return self.optimizer\n\n\nclass LBFGS(Optimizer):\n \"\"\"\n This implementation of L-BFGS relies on a user-provided line\n search function (state.lineSearch). If this function is not\n provided, then a simple learningRate is used to produce fixed\n size steps. Fixed size steps are much less costly than line\n searches, and can be useful for stochastic problems.\n The learning rate is used even when a line search is provided.\n This is also useful for large-scale stochastic problems, where\n opfunc is a noisy approximation of f(x). In that case, the learning\n rate allows a reduction of confidence in the step size.\n\n :param max_iter Maximum number of iterations allowed\n :param max_eval Maximum number of function evaluations\n :param tolfun Termination tolerance on the first-order optimality\n :param tolx Termination tol on progress in terms of func/param changes\n :param ncorrection\n :param learningrate\n :param verbose\n :param linesearch A line search function\n :param linesearch_options If no line search provided, then a fixed step size is used\n >>> lbfgs = LBFGS()\n creating: createLBFGS\n \"\"\"\n\n def __init__(self, max_iter: int=20, max_eval: float=DOUBLEMAX, tolfun:\n float=1e-05, tolx: float=1e-09, ncorrection: int=100, learningrate:\n float=1.0, verbose: bool=False, linesearch: Any=None,\n linesearch_options: Optional[Dict[Any, Any]]=None) ->None:\n from bigdl.dllib.optim.optimizer import LBFGS as BLBFGS\n self.optimizer = BLBFGS(max_iter, max_eval, tolfun, tolx,\n ncorrection, learningrate, verbose, linesearch,\n linesearch_options, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.LBFGS':\n return self.optimizer\n\n\nclass Adadelta(Optimizer):\n \"\"\"\n Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701\n\n :param decayrate interpolation parameter rho\n :param epsilon for numerical stability\n >>> adagrad = Adadelta()\n creating: createAdadelta\n \"\"\"\n\n def __init__(self, decayrate: float=0.9, epsilon: float=1e-10) ->None:\n from bigdl.dllib.optim.optimizer import Adadelta as BAdadelta\n self.optimizer = BAdadelta(decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adadelta':\n return self.optimizer\n\n\nclass Adam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adam = Adam()\n creating: createAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08\n ) ->None:\n from bigdl.dllib.optim.optimizer import Adam as BAdam\n self.optimizer = BAdam(learningrate, learningrate_decay, beta1,\n beta2, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adam':\n return self.optimizer\n\n\nclass ParallelAdam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> pAdam = ParallelAdam()\n creating: createParallelAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08,\n parallel_num: int=-1) ->None:\n from bigdl.dllib.optim.optimizer import ParallelAdam as BParallelAdam\n self.optimizer = BParallelAdam(learningrate, learningrate_decay,\n beta1, beta2, epsilon, parallel_num, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.ParallelAdam':\n return self.optimizer\n\n\nclass Ftrl(Optimizer):\n \"\"\"\n An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.\n Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.\n\n :param learningrate learning rate\n :param learningrate_power double, must be less or equal to zero. Default is -0.5.\n :param initial_accumulator_value double, the starting value for accumulators,\n require zero or positive values.\n :param l1_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_shrinkage_regularization_strength double, must be greater or equal to zero.\n Default is zero. This differs from l2RegularizationStrength above. L2 above is a\n stabilization penalty, whereas this one is a magnitude penalty.\n >>> ftrl = Ftrl()\n creating: createFtrl\n >>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)\n creating: createFtrl\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_power: float\n =-0.5, initial_accumulator_value: float=0.1,\n l1_regularization_strength: float=0.0, l2_regularization_strength:\n float=0.0, l2_shrinkage_regularization_strength: float=0.0) ->None:\n from bigdl.dllib.optim.optimizer import Ftrl as BFtrl\n self.optimizer = BFtrl(learningrate, learningrate_power,\n initial_accumulator_value, l1_regularization_strength,\n l2_regularization_strength,\n l2_shrinkage_regularization_strength, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Ftrl':\n return self.optimizer\n\n\nclass Adamax(Optimizer):\n \"\"\"\n An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adagrad = Adamax()\n creating: createAdamax\n \"\"\"\n\n def __init__(self, learningrate: float=0.002, beta1: float=0.9, beta2:\n float=0.999, epsilon: float=1e-38) ->None:\n from bigdl.dllib.optim.optimizer import Adamax as BAdamax\n self.optimizer = BAdamax(learningrate, beta1, beta2, epsilon,\n bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adamax':\n return self.optimizer\n\n\nclass RMSprop(Optimizer):\n \"\"\"\n An implementation of RMSprop\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param decayrate decay rate, also called rho\n :param epsilon for numerical stability\n >>> adagrad = RMSprop()\n creating: createRMSprop\n \"\"\"\n\n def __init__(self, learningrate: float=0.01, learningrate_decay: float=\n 0.0, decayrate: float=0.99, epsilon: float=1e-08) ->None:\n from bigdl.dllib.optim.optimizer import RMSprop as BRMSprop\n self.optimizer = BRMSprop(learningrate, learningrate_decay,\n decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.RMSprop':\n return self.optimizer\n",
"step-4": "<mask token>\n\n\nclass SGD(Optimizer):\n <mask token>\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, weightdecay: float=0.0, momentum: float=0.0, dampening: float\n =DOUBLEMAX, nesterov: bool=False, learningrate_schedule: Optional[\n 'Scheduler']=None, learningrates: Optional['np.ndarray']=None,\n weightdecays: Optional['np.ndarray']=None) ->None:\n from bigdl.dllib.optim.optimizer import SGD as BSGD\n invalidInputError(isinstance(learningrate_schedule, Scheduler),\n 'learningrate_schedule should be an bigdl.orca.learn.optimizers.schedule.Scheduler, but got {learningrate_schedule}'\n )\n self.optimizer = BSGD(learningrate, learningrate_decay, weightdecay,\n momentum, dampening, nesterov, learningrate_schedule.\n get_scheduler(), learningrates, weightdecays, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.SGD':\n return self.optimizer\n\n\nclass Adagrad(Optimizer):\n \"\"\"\n An implementation of Adagrad. See the original paper:\n http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf\n\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param weightdecay weight decay\n >>> adagrad = Adagrad()\n creating: createAdagrad\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, weightdecay: float=0.0) ->None:\n from bigdl.dllib.optim.optimizer import Adagrad as BAdagrad\n self.optimizer = BAdagrad(learningrate, learningrate_decay,\n weightdecay, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adagrad':\n return self.optimizer\n\n\nclass LBFGS(Optimizer):\n \"\"\"\n This implementation of L-BFGS relies on a user-provided line\n search function (state.lineSearch). If this function is not\n provided, then a simple learningRate is used to produce fixed\n size steps. Fixed size steps are much less costly than line\n searches, and can be useful for stochastic problems.\n The learning rate is used even when a line search is provided.\n This is also useful for large-scale stochastic problems, where\n opfunc is a noisy approximation of f(x). In that case, the learning\n rate allows a reduction of confidence in the step size.\n\n :param max_iter Maximum number of iterations allowed\n :param max_eval Maximum number of function evaluations\n :param tolfun Termination tolerance on the first-order optimality\n :param tolx Termination tol on progress in terms of func/param changes\n :param ncorrection\n :param learningrate\n :param verbose\n :param linesearch A line search function\n :param linesearch_options If no line search provided, then a fixed step size is used\n >>> lbfgs = LBFGS()\n creating: createLBFGS\n \"\"\"\n\n def __init__(self, max_iter: int=20, max_eval: float=DOUBLEMAX, tolfun:\n float=1e-05, tolx: float=1e-09, ncorrection: int=100, learningrate:\n float=1.0, verbose: bool=False, linesearch: Any=None,\n linesearch_options: Optional[Dict[Any, Any]]=None) ->None:\n from bigdl.dllib.optim.optimizer import LBFGS as BLBFGS\n self.optimizer = BLBFGS(max_iter, max_eval, tolfun, tolx,\n ncorrection, learningrate, verbose, linesearch,\n linesearch_options, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.LBFGS':\n return self.optimizer\n\n\nclass Adadelta(Optimizer):\n \"\"\"\n Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701\n\n :param decayrate interpolation parameter rho\n :param epsilon for numerical stability\n >>> adagrad = Adadelta()\n creating: createAdadelta\n \"\"\"\n\n def __init__(self, decayrate: float=0.9, epsilon: float=1e-10) ->None:\n from bigdl.dllib.optim.optimizer import Adadelta as BAdadelta\n self.optimizer = BAdadelta(decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adadelta':\n return self.optimizer\n\n\nclass Adam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adam = Adam()\n creating: createAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08\n ) ->None:\n from bigdl.dllib.optim.optimizer import Adam as BAdam\n self.optimizer = BAdam(learningrate, learningrate_decay, beta1,\n beta2, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adam':\n return self.optimizer\n\n\nclass ParallelAdam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> pAdam = ParallelAdam()\n creating: createParallelAdam\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_decay: float\n =0.0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08,\n parallel_num: int=-1) ->None:\n from bigdl.dllib.optim.optimizer import ParallelAdam as BParallelAdam\n self.optimizer = BParallelAdam(learningrate, learningrate_decay,\n beta1, beta2, epsilon, parallel_num, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.ParallelAdam':\n return self.optimizer\n\n\nclass Ftrl(Optimizer):\n \"\"\"\n An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.\n Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.\n\n :param learningrate learning rate\n :param learningrate_power double, must be less or equal to zero. Default is -0.5.\n :param initial_accumulator_value double, the starting value for accumulators,\n require zero or positive values.\n :param l1_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_shrinkage_regularization_strength double, must be greater or equal to zero.\n Default is zero. This differs from l2RegularizationStrength above. L2 above is a\n stabilization penalty, whereas this one is a magnitude penalty.\n >>> ftrl = Ftrl()\n creating: createFtrl\n >>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)\n creating: createFtrl\n \"\"\"\n\n def __init__(self, learningrate: float=0.001, learningrate_power: float\n =-0.5, initial_accumulator_value: float=0.1,\n l1_regularization_strength: float=0.0, l2_regularization_strength:\n float=0.0, l2_shrinkage_regularization_strength: float=0.0) ->None:\n from bigdl.dllib.optim.optimizer import Ftrl as BFtrl\n self.optimizer = BFtrl(learningrate, learningrate_power,\n initial_accumulator_value, l1_regularization_strength,\n l2_regularization_strength,\n l2_shrinkage_regularization_strength, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Ftrl':\n return self.optimizer\n\n\nclass Adamax(Optimizer):\n \"\"\"\n An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adagrad = Adamax()\n creating: createAdamax\n \"\"\"\n\n def __init__(self, learningrate: float=0.002, beta1: float=0.9, beta2:\n float=0.999, epsilon: float=1e-38) ->None:\n from bigdl.dllib.optim.optimizer import Adamax as BAdamax\n self.optimizer = BAdamax(learningrate, beta1, beta2, epsilon,\n bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.Adamax':\n return self.optimizer\n\n\nclass RMSprop(Optimizer):\n \"\"\"\n An implementation of RMSprop\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param decayrate decay rate, also called rho\n :param epsilon for numerical stability\n >>> adagrad = RMSprop()\n creating: createRMSprop\n \"\"\"\n\n def __init__(self, learningrate: float=0.01, learningrate_decay: float=\n 0.0, decayrate: float=0.99, epsilon: float=1e-08) ->None:\n from bigdl.dllib.optim.optimizer import RMSprop as BRMSprop\n self.optimizer = BRMSprop(learningrate, learningrate_decay,\n decayrate, epsilon, bigdl_type='float')\n\n def get_optimizer(self) ->'optimizer.RMSprop':\n return self.optimizer\n",
"step-5": "#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom abc import ABC, abstractmethod\n\nfrom bigdl.dllib.utils.common import DOUBLEMAX\nfrom bigdl.orca.learn.optimizers.schedule import Scheduler\nfrom bigdl.dllib.utils.log4Error import invalidInputError\n\nfrom typing import (Any, Optional, Dict, TYPE_CHECKING)\n\nif TYPE_CHECKING:\n from bigdl.dllib.optim import optimizer\n import numpy as np\n\n\nclass Optimizer(ABC):\n\n @abstractmethod\n def get_optimizer(self):\n pass\n\n\nclass SGD(Optimizer):\n \"\"\"\n A plain implementation of SGD\n\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param weightdecay weight decay\n :param momentum momentum\n :param dampening dampening for momentum\n :param nesterov enables Nesterov momentum\n :param learningrates 1D tensor of individual learning rates\n :param weightdecays 1D tensor of individual weight decays\n >>> sgd = SGD()\n creating: createDefault\n creating: createSGD\n \"\"\"\n\n def __init__(self,\n learningrate: float = 1e-3,\n learningrate_decay: float = 0.0,\n weightdecay: float = 0.0,\n momentum: float = 0.0,\n dampening: float = DOUBLEMAX,\n nesterov: bool = False,\n learningrate_schedule: Optional[\"Scheduler\"] = None,\n learningrates: Optional[\"np.ndarray\"] = None,\n weightdecays: Optional[\"np.ndarray\"] = None) -> None:\n from bigdl.dllib.optim.optimizer import SGD as BSGD\n invalidInputError(isinstance(learningrate_schedule, Scheduler),\n \"learningrate_schedule should be an \"\n \"bigdl.orca.learn.optimizers.schedule.Scheduler,\"\n \" but got {learningrate_schedule}\")\n self.optimizer = BSGD(learningrate,\n learningrate_decay,\n weightdecay,\n momentum,\n dampening,\n nesterov,\n learningrate_schedule.get_scheduler(), # type: ignore\n learningrates,\n weightdecays,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.SGD\":\n return self.optimizer\n\n\nclass Adagrad(Optimizer):\n \"\"\"\n An implementation of Adagrad. See the original paper:\n http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf\n\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param weightdecay weight decay\n >>> adagrad = Adagrad()\n creating: createAdagrad\n \"\"\"\n\n def __init__(self,\n learningrate: float = 1e-3,\n learningrate_decay: float = 0.0,\n weightdecay: float = 0.0) -> None:\n from bigdl.dllib.optim.optimizer import Adagrad as BAdagrad\n self.optimizer = BAdagrad(learningrate, learningrate_decay,\n weightdecay, bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.Adagrad\":\n return self.optimizer\n\n\nclass LBFGS(Optimizer):\n \"\"\"\n This implementation of L-BFGS relies on a user-provided line\n search function (state.lineSearch). If this function is not\n provided, then a simple learningRate is used to produce fixed\n size steps. Fixed size steps are much less costly than line\n searches, and can be useful for stochastic problems.\n The learning rate is used even when a line search is provided.\n This is also useful for large-scale stochastic problems, where\n opfunc is a noisy approximation of f(x). In that case, the learning\n rate allows a reduction of confidence in the step size.\n\n :param max_iter Maximum number of iterations allowed\n :param max_eval Maximum number of function evaluations\n :param tolfun Termination tolerance on the first-order optimality\n :param tolx Termination tol on progress in terms of func/param changes\n :param ncorrection\n :param learningrate\n :param verbose\n :param linesearch A line search function\n :param linesearch_options If no line search provided, then a fixed step size is used\n >>> lbfgs = LBFGS()\n creating: createLBFGS\n \"\"\"\n\n def __init__(self,\n max_iter: int = 20,\n max_eval: float = DOUBLEMAX,\n tolfun: float = 1e-5,\n tolx: float = 1e-9,\n ncorrection: int = 100,\n learningrate: float = 1.0,\n verbose: bool = False,\n linesearch: Any = None,\n linesearch_options: Optional[Dict[Any, Any]]=None) -> None:\n from bigdl.dllib.optim.optimizer import LBFGS as BLBFGS\n self.optimizer = BLBFGS(\n max_iter,\n max_eval,\n tolfun,\n tolx,\n ncorrection,\n learningrate,\n verbose,\n linesearch,\n linesearch_options,\n bigdl_type=\"float\"\n )\n\n def get_optimizer(self) -> \"optimizer.LBFGS\":\n return self.optimizer\n\n\nclass Adadelta(Optimizer):\n \"\"\"\n Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701\n\n :param decayrate interpolation parameter rho\n :param epsilon for numerical stability\n >>> adagrad = Adadelta()\n creating: createAdadelta\n \"\"\"\n\n def __init__(self,\n decayrate: float = 0.9,\n epsilon: float = 1e-10) -> None:\n from bigdl.dllib.optim.optimizer import Adadelta as BAdadelta\n self.optimizer = BAdadelta(decayrate,\n epsilon,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.Adadelta\":\n return self.optimizer\n\n\nclass Adam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adam = Adam()\n creating: createAdam\n \"\"\"\n\n def __init__(self,\n learningrate: float = 1e-3,\n learningrate_decay: float = 0.0,\n beta1: float = 0.9,\n beta2: float = 0.999,\n epsilon: float = 1e-8) -> None:\n from bigdl.dllib.optim.optimizer import Adam as BAdam\n self.optimizer = BAdam(learningrate,\n learningrate_decay,\n beta1,\n beta2,\n epsilon,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.Adam\":\n return self.optimizer\n\n\nclass ParallelAdam(Optimizer):\n \"\"\"\n An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> pAdam = ParallelAdam()\n creating: createParallelAdam\n \"\"\"\n\n def __init__(self,\n learningrate: float = 1e-3,\n learningrate_decay: float = 0.0,\n beta1: float = 0.9,\n beta2: float = 0.999,\n epsilon: float = 1e-8,\n parallel_num: int = -1) -> None:\n from bigdl.dllib.optim.optimizer import ParallelAdam as BParallelAdam\n self.optimizer = BParallelAdam(learningrate,\n learningrate_decay,\n beta1,\n beta2,\n epsilon,\n parallel_num,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.ParallelAdam\":\n return self.optimizer\n\n\nclass Ftrl(Optimizer):\n \"\"\"\n An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.\n Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.\n\n :param learningrate learning rate\n :param learningrate_power double, must be less or equal to zero. Default is -0.5.\n :param initial_accumulator_value double, the starting value for accumulators,\n require zero or positive values.\n :param l1_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_regularization_strength double, must be greater or equal to zero. Default is zero.\n :param l2_shrinkage_regularization_strength double, must be greater or equal to zero.\n Default is zero. This differs from l2RegularizationStrength above. L2 above is a\n stabilization penalty, whereas this one is a magnitude penalty.\n >>> ftrl = Ftrl()\n creating: createFtrl\n >>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)\n creating: createFtrl\n \"\"\"\n\n def __init__(self,\n learningrate: float = 1e-3,\n learningrate_power: float = -0.5,\n initial_accumulator_value: float = 0.1,\n l1_regularization_strength: float = 0.0,\n l2_regularization_strength: float = 0.0,\n l2_shrinkage_regularization_strength: float = 0.0) -> None:\n from bigdl.dllib.optim.optimizer import Ftrl as BFtrl\n self.optimizer = BFtrl(learningrate,\n learningrate_power,\n initial_accumulator_value,\n l1_regularization_strength,\n l2_regularization_strength,\n l2_shrinkage_regularization_strength,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.Ftrl\":\n return self.optimizer\n\n\nclass Adamax(Optimizer):\n \"\"\"\n An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf\n :param learningrate learning rate\n :param beta1 first moment coefficient\n :param beta2 second moment coefficient\n :param epsilon for numerical stability\n >>> adagrad = Adamax()\n creating: createAdamax\n \"\"\"\n\n def __init__(self,\n learningrate: float = 0.002,\n beta1: float = 0.9,\n beta2: float = 0.999,\n epsilon: float = 1e-38) -> None:\n from bigdl.dllib.optim.optimizer import Adamax as BAdamax\n self.optimizer = BAdamax(learningrate,\n beta1,\n beta2,\n epsilon,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.Adamax\":\n return self.optimizer\n\n\nclass RMSprop(Optimizer):\n \"\"\"\n An implementation of RMSprop\n :param learningrate learning rate\n :param learningrate_decay learning rate decay\n :param decayrate decay rate, also called rho\n :param epsilon for numerical stability\n >>> adagrad = RMSprop()\n creating: createRMSprop\n \"\"\"\n\n def __init__(self,\n learningrate: float = 1e-2,\n learningrate_decay: float = 0.0,\n decayrate: float = 0.99,\n epsilon: float = 1e-8) -> None:\n from bigdl.dllib.optim.optimizer import RMSprop as BRMSprop\n self.optimizer = BRMSprop(learningrate,\n learningrate_decay,\n decayrate,\n epsilon,\n bigdl_type=\"float\")\n\n def get_optimizer(self) -> \"optimizer.RMSprop\":\n return self.optimizer\n",
"step-ids": [
19,
29,
33,
35,
41
]
}
|
[
19,
29,
33,
35,
41
] |
#YET TO COMMENT.
import numpy as np
from functools import reduce
class ProbabilityNetwork:
def __init__(self,n,edges,probs):
self.nodes=list(range(n))
self.edges=edges
self.probs=probs
def parents(self, node):
return [a for a,b in edges if b==node]
def ancestralOrder(self):
order=[]
while len(order)<len(self.nodes):
for node in self.nodes:
if node in order:
continue
if not any((edge[0] not in order) and (edge[1]==node) for edge in self.edges):
order.append(node)
return order
def logicSampling(self, evidences, targetNode, niters=10000000):
evidenceNodes=evidences.keys()
ancestralOrder = self.ancestralOrder()
hits=0
total=0
for it in range(niters):
fail=False
values=dict([ [i,None] for i in self.nodes]) #True: present. False: not present
for node in ancestralOrder:
pNode=self.probs(node, values)
nodeValue=np.random.random()<pNode
values[node]=nodeValue
if node in evidences and evidences[node]!=values[node]:
fail=True
break
if fail: continue
#print(values)
total+=1
if values[targetNode]:
hits+=1
return hits/total
def weightedLikelihood(self, evidences, targetNode, niters=10000000):
evidenceNodes=evidences.keys()
ancestralOrder = [node for node in self.ancestralOrder() if node not in evidenceNodes]
cumsumHit=0
cumsumTotal=0
hits=0
for it in range(niters):
values=dict([ [i,None] for i in ancestralOrder]) #True: present. False: not present
for evNode in evidenceNodes:
values[evNode]=evidences[evNode]
for node in ancestralOrder:
pNode=self.probs(node, values)
nodeValue=np.random.random()<pNode
values[node]=nodeValue
currProb=reduce(lambda x,y:x*y, [self.probs(i,values) if values[i] else 1-self.probs(i,values) for i in evidenceNodes ])
if values[targetNode]:
cumsumHit+=currProb
cumsumTotal+=currProb
return cumsumHit/cumsumTotal
edges=[(0,1),(0,2),(1,3),(1,4),(2,4),(2,5)]
def probs(node,evidences):
if node==0: return 0.3
elif node==1:
if evidences[0]: return 0.9
else: return 0.2
elif node==2:
if evidences[0]: return 0.75
else: return 0.25
elif node==3:
if evidences[1]: return 0.6
else: return 0.1
elif node==4:
if evidences[1] and evidences[2]: return 0.8
elif evidences[1] and not evidences[2]: return 0.6
elif not evidences[1] and evidences[2]: return 0.5
else: return 0
elif node==5:
if evidences[2]: return 0.4
else: return 0.1
pn=ProbabilityNetwork(6, edges, probs)
evidences=dict([[3,True],[4,True],[5,False]])
print(pn.logicSampling(evidences, 0))
print(pn.weightedLikelihood(evidences,0))
|
normal
|
{
"blob_id": "24fa41f916b54345e4647354f972bd22e130decf",
"index": 4016,
"step-1": "<mask token>\n\n\nclass ProbabilityNetwork:\n\n def __init__(self, n, edges, probs):\n self.nodes = list(range(n))\n self.edges = edges\n self.probs = probs\n\n def parents(self, node):\n return [a for a, b in edges if b == node]\n\n def ancestralOrder(self):\n order = []\n while len(order) < len(self.nodes):\n for node in self.nodes:\n if node in order:\n continue\n if not any(edge[0] not in order and edge[1] == node for\n edge in self.edges):\n order.append(node)\n return order\n\n def logicSampling(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = self.ancestralOrder()\n hits = 0\n total = 0\n for it in range(niters):\n fail = False\n values = dict([[i, None] for i in self.nodes])\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n if node in evidences and evidences[node] != values[node]:\n fail = True\n break\n if fail:\n continue\n total += 1\n if values[targetNode]:\n hits += 1\n return hits / total\n\n def weightedLikelihood(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = [node for node in self.ancestralOrder() if node not in\n evidenceNodes]\n cumsumHit = 0\n cumsumTotal = 0\n hits = 0\n for it in range(niters):\n values = dict([[i, None] for i in ancestralOrder])\n for evNode in evidenceNodes:\n values[evNode] = evidences[evNode]\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n currProb = reduce(lambda x, y: x * y, [(self.probs(i, values) if\n values[i] else 1 - self.probs(i, values)) for i in\n evidenceNodes])\n if values[targetNode]:\n cumsumHit += currProb\n cumsumTotal += currProb\n return cumsumHit / cumsumTotal\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProbabilityNetwork:\n\n def __init__(self, n, edges, probs):\n self.nodes = list(range(n))\n self.edges = edges\n self.probs = probs\n\n def parents(self, node):\n return [a for a, b in edges if b == node]\n\n def ancestralOrder(self):\n order = []\n while len(order) < len(self.nodes):\n for node in self.nodes:\n if node in order:\n continue\n if not any(edge[0] not in order and edge[1] == node for\n edge in self.edges):\n order.append(node)\n return order\n\n def logicSampling(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = self.ancestralOrder()\n hits = 0\n total = 0\n for it in range(niters):\n fail = False\n values = dict([[i, None] for i in self.nodes])\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n if node in evidences and evidences[node] != values[node]:\n fail = True\n break\n if fail:\n continue\n total += 1\n if values[targetNode]:\n hits += 1\n return hits / total\n\n def weightedLikelihood(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = [node for node in self.ancestralOrder() if node not in\n evidenceNodes]\n cumsumHit = 0\n cumsumTotal = 0\n hits = 0\n for it in range(niters):\n values = dict([[i, None] for i in ancestralOrder])\n for evNode in evidenceNodes:\n values[evNode] = evidences[evNode]\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n currProb = reduce(lambda x, y: x * y, [(self.probs(i, values) if\n values[i] else 1 - self.probs(i, values)) for i in\n evidenceNodes])\n if values[targetNode]:\n cumsumHit += currProb\n cumsumTotal += currProb\n return cumsumHit / cumsumTotal\n\n\n<mask token>\n\n\ndef probs(node, evidences):\n if node == 0:\n return 0.3\n elif node == 1:\n if evidences[0]:\n return 0.9\n else:\n return 0.2\n elif node == 2:\n if evidences[0]:\n return 0.75\n else:\n return 0.25\n elif node == 3:\n if evidences[1]:\n return 0.6\n else:\n return 0.1\n elif node == 4:\n if evidences[1] and evidences[2]:\n return 0.8\n elif evidences[1] and not evidences[2]:\n return 0.6\n elif not evidences[1] and evidences[2]:\n return 0.5\n else:\n return 0\n elif node == 5:\n if evidences[2]:\n return 0.4\n else:\n return 0.1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProbabilityNetwork:\n\n def __init__(self, n, edges, probs):\n self.nodes = list(range(n))\n self.edges = edges\n self.probs = probs\n\n def parents(self, node):\n return [a for a, b in edges if b == node]\n\n def ancestralOrder(self):\n order = []\n while len(order) < len(self.nodes):\n for node in self.nodes:\n if node in order:\n continue\n if not any(edge[0] not in order and edge[1] == node for\n edge in self.edges):\n order.append(node)\n return order\n\n def logicSampling(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = self.ancestralOrder()\n hits = 0\n total = 0\n for it in range(niters):\n fail = False\n values = dict([[i, None] for i in self.nodes])\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n if node in evidences and evidences[node] != values[node]:\n fail = True\n break\n if fail:\n continue\n total += 1\n if values[targetNode]:\n hits += 1\n return hits / total\n\n def weightedLikelihood(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = [node for node in self.ancestralOrder() if node not in\n evidenceNodes]\n cumsumHit = 0\n cumsumTotal = 0\n hits = 0\n for it in range(niters):\n values = dict([[i, None] for i in ancestralOrder])\n for evNode in evidenceNodes:\n values[evNode] = evidences[evNode]\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n currProb = reduce(lambda x, y: x * y, [(self.probs(i, values) if\n values[i] else 1 - self.probs(i, values)) for i in\n evidenceNodes])\n if values[targetNode]:\n cumsumHit += currProb\n cumsumTotal += currProb\n return cumsumHit / cumsumTotal\n\n\nedges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 4), (2, 5)]\n\n\ndef probs(node, evidences):\n if node == 0:\n return 0.3\n elif node == 1:\n if evidences[0]:\n return 0.9\n else:\n return 0.2\n elif node == 2:\n if evidences[0]:\n return 0.75\n else:\n return 0.25\n elif node == 3:\n if evidences[1]:\n return 0.6\n else:\n return 0.1\n elif node == 4:\n if evidences[1] and evidences[2]:\n return 0.8\n elif evidences[1] and not evidences[2]:\n return 0.6\n elif not evidences[1] and evidences[2]:\n return 0.5\n else:\n return 0\n elif node == 5:\n if evidences[2]:\n return 0.4\n else:\n return 0.1\n\n\npn = ProbabilityNetwork(6, edges, probs)\nevidences = dict([[3, True], [4, True], [5, False]])\nprint(pn.logicSampling(evidences, 0))\nprint(pn.weightedLikelihood(evidences, 0))\n",
"step-4": "import numpy as np\nfrom functools import reduce\n\n\nclass ProbabilityNetwork:\n\n def __init__(self, n, edges, probs):\n self.nodes = list(range(n))\n self.edges = edges\n self.probs = probs\n\n def parents(self, node):\n return [a for a, b in edges if b == node]\n\n def ancestralOrder(self):\n order = []\n while len(order) < len(self.nodes):\n for node in self.nodes:\n if node in order:\n continue\n if not any(edge[0] not in order and edge[1] == node for\n edge in self.edges):\n order.append(node)\n return order\n\n def logicSampling(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = self.ancestralOrder()\n hits = 0\n total = 0\n for it in range(niters):\n fail = False\n values = dict([[i, None] for i in self.nodes])\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n if node in evidences and evidences[node] != values[node]:\n fail = True\n break\n if fail:\n continue\n total += 1\n if values[targetNode]:\n hits += 1\n return hits / total\n\n def weightedLikelihood(self, evidences, targetNode, niters=10000000):\n evidenceNodes = evidences.keys()\n ancestralOrder = [node for node in self.ancestralOrder() if node not in\n evidenceNodes]\n cumsumHit = 0\n cumsumTotal = 0\n hits = 0\n for it in range(niters):\n values = dict([[i, None] for i in ancestralOrder])\n for evNode in evidenceNodes:\n values[evNode] = evidences[evNode]\n for node in ancestralOrder:\n pNode = self.probs(node, values)\n nodeValue = np.random.random() < pNode\n values[node] = nodeValue\n currProb = reduce(lambda x, y: x * y, [(self.probs(i, values) if\n values[i] else 1 - self.probs(i, values)) for i in\n evidenceNodes])\n if values[targetNode]:\n cumsumHit += currProb\n cumsumTotal += currProb\n return cumsumHit / cumsumTotal\n\n\nedges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 4), (2, 5)]\n\n\ndef probs(node, evidences):\n if node == 0:\n return 0.3\n elif node == 1:\n if evidences[0]:\n return 0.9\n else:\n return 0.2\n elif node == 2:\n if evidences[0]:\n return 0.75\n else:\n return 0.25\n elif node == 3:\n if evidences[1]:\n return 0.6\n else:\n return 0.1\n elif node == 4:\n if evidences[1] and evidences[2]:\n return 0.8\n elif evidences[1] and not evidences[2]:\n return 0.6\n elif not evidences[1] and evidences[2]:\n return 0.5\n else:\n return 0\n elif node == 5:\n if evidences[2]:\n return 0.4\n else:\n return 0.1\n\n\npn = ProbabilityNetwork(6, edges, probs)\nevidences = dict([[3, True], [4, True], [5, False]])\nprint(pn.logicSampling(evidences, 0))\nprint(pn.weightedLikelihood(evidences, 0))\n",
"step-5": "#YET TO COMMENT.\n\nimport numpy as np\nfrom functools import reduce\n\nclass ProbabilityNetwork:\n def __init__(self,n,edges,probs):\n self.nodes=list(range(n))\n self.edges=edges\n self.probs=probs\n\n def parents(self, node):\n return [a for a,b in edges if b==node]\n\n def ancestralOrder(self):\n order=[]\n while len(order)<len(self.nodes):\n for node in self.nodes:\n if node in order:\n continue\n if not any((edge[0] not in order) and (edge[1]==node) for edge in self.edges):\n order.append(node)\n return order\n\n def logicSampling(self, evidences, targetNode, niters=10000000):\n evidenceNodes=evidences.keys()\n ancestralOrder = self.ancestralOrder()\n hits=0\n total=0\n\n for it in range(niters):\n fail=False\n values=dict([ [i,None] for i in self.nodes]) #True: present. False: not present\n for node in ancestralOrder:\n pNode=self.probs(node, values)\n nodeValue=np.random.random()<pNode\n values[node]=nodeValue\n if node in evidences and evidences[node]!=values[node]:\n fail=True\n break\n\n if fail: continue\n\n #print(values)\n total+=1\n if values[targetNode]:\n hits+=1\n\n return hits/total\n\n def weightedLikelihood(self, evidences, targetNode, niters=10000000):\n evidenceNodes=evidences.keys()\n\n ancestralOrder = [node for node in self.ancestralOrder() if node not in evidenceNodes]\n cumsumHit=0\n cumsumTotal=0\n hits=0\n for it in range(niters):\n values=dict([ [i,None] for i in ancestralOrder]) #True: present. False: not present\n for evNode in evidenceNodes:\n values[evNode]=evidences[evNode]\n\n for node in ancestralOrder:\n pNode=self.probs(node, values)\n nodeValue=np.random.random()<pNode\n values[node]=nodeValue\n\n currProb=reduce(lambda x,y:x*y, [self.probs(i,values) if values[i] else 1-self.probs(i,values) for i in evidenceNodes ])\n if values[targetNode]:\n cumsumHit+=currProb\n\n cumsumTotal+=currProb\n\n return cumsumHit/cumsumTotal\n\n\n\nedges=[(0,1),(0,2),(1,3),(1,4),(2,4),(2,5)]\n\ndef probs(node,evidences):\n if node==0: return 0.3\n elif node==1:\n if evidences[0]: return 0.9\n else: return 0.2\n elif node==2:\n if evidences[0]: return 0.75\n else: return 0.25\n elif node==3:\n if evidences[1]: return 0.6\n else: return 0.1\n elif node==4:\n if evidences[1] and evidences[2]: return 0.8\n elif evidences[1] and not evidences[2]: return 0.6\n elif not evidences[1] and evidences[2]: return 0.5\n else: return 0\n elif node==5:\n if evidences[2]: return 0.4\n else: return 0.1\n\npn=ProbabilityNetwork(6, edges, probs)\n\nevidences=dict([[3,True],[4,True],[5,False]])\n\nprint(pn.logicSampling(evidences, 0))\nprint(pn.weightedLikelihood(evidences,0))\n\n\n\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
__author__ = "那位先生Beer"
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import xlrd
import numpy as np
print('输入鲈鱼的先验概率例如:70,对应70%')
a=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=15)
#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)
data=xlrd.open_workbook('xqtest.xls')
shxrange=range(data.nsheets)
sh=data.sheet_by_name("1")
L=[]
for i in range(0,(int(a))*50):
rowa_data=sh.row_values(i)
L.append(rowa_data)
L=np.array(L)
L=L[:,0:2]
G=[]
for j in range(5000,5000+(100-int(a))*50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G=np.array(G)
G=G[:,0:2]
plt.figure(figsize=(8,6))
plt.title("生成的鲈鱼和鲑鱼数据的散点图",fontproperties=font_set)
plt.xlabel("长度",fontproperties=font_set)
plt.ylabel("宽度",fontproperties=font_set)
plt.scatter(L[:,0],L[:,1],marker="o",label="鲈鱼")
plt.scatter(G[:,0],G[:,1],marker="s",label="鲑鱼")
# 分类模型
x = np.linspace(0,8)
y = -x+9
plt.plot(x,y, color="red")
plt.legend()
plt.show()
#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:
#计算准确率
count=0
for i in L:
if i[0]+i[1]<=9:
count=count+1
q=(count/((int(a))*50))
print('鲈鱼准确率:%s'%(count/((int(a))*50)))
countG=0
for i in G:
if i[0]+i[1]>=9:
countG=countG+1
p=(countG/((100-int(a))*50))
print('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))
#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)
pb=(int(a)/100)*q + (1-(int(a)/100))*p
print(pb)
#p(ab)=p(b|a)*p(a)
pab=(int(a)/100)*q
print(pab)
print(pab/pb)
|
normal
|
{
"blob_id": "077b6d3d7417bbc26e9f23af6f437ff05e3d5771",
"index": 812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('输入鲈鱼的先验概率例如:70,对应70%')\n<mask token>\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\n<mask token>\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\n<mask token>\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\n<mask token>\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\n<mask token>\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\n<mask token>\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\n<mask token>\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\n<mask token>\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\n<mask token>\nprint(pb)\n<mask token>\nprint(pab)\nprint(pab / pb)\n",
"step-3": "__author__ = '那位先生Beer'\n<mask token>\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=15)\ndata = xlrd.open_workbook('xqtest.xls')\nshxrange = range(data.nsheets)\nsh = data.sheet_by_name('1')\nL = []\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\nL = np.array(L)\nL = L[:, 0:2]\nG = []\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG = np.array(G)\nG = G[:, 0:2]\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\nx = np.linspace(0, 8)\ny = -x + 9\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\ncount = 0\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\nq = count / (int(a) * 50)\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\ncountG = 0\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\np = countG / ((100 - int(a)) * 50)\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\npb = int(a) / 100 * q + (1 - int(a) / 100) * p\nprint(pb)\npab = int(a) / 100 * q\nprint(pab)\nprint(pab / pb)\n",
"step-4": "__author__ = '那位先生Beer'\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport xlrd\nimport numpy as np\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=15)\ndata = xlrd.open_workbook('xqtest.xls')\nshxrange = range(data.nsheets)\nsh = data.sheet_by_name('1')\nL = []\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\nL = np.array(L)\nL = L[:, 0:2]\nG = []\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG = np.array(G)\nG = G[:, 0:2]\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\nx = np.linspace(0, 8)\ny = -x + 9\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\ncount = 0\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\nq = count / (int(a) * 50)\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\ncountG = 0\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\np = countG / ((100 - int(a)) * 50)\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\npb = int(a) / 100 * q + (1 - int(a) / 100) * p\nprint(pb)\npab = int(a) / 100 * q\nprint(pab)\nprint(pab / pb)\n",
"step-5": "__author__ = \"那位先生Beer\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport xlrd\nimport numpy as np\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=15)\n#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)\ndata=xlrd.open_workbook('xqtest.xls')\nshxrange=range(data.nsheets)\nsh=data.sheet_by_name(\"1\")\nL=[]\nfor i in range(0,(int(a))*50):\n rowa_data=sh.row_values(i)\n L.append(rowa_data)\nL=np.array(L)\nL=L[:,0:2]\n\nG=[]\nfor j in range(5000,5000+(100-int(a))*50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG=np.array(G)\nG=G[:,0:2]\nplt.figure(figsize=(8,6))\nplt.title(\"生成的鲈鱼和鲑鱼数据的散点图\",fontproperties=font_set)\nplt.xlabel(\"长度\",fontproperties=font_set)\nplt.ylabel(\"宽度\",fontproperties=font_set)\nplt.scatter(L[:,0],L[:,1],marker=\"o\",label=\"鲈鱼\")\nplt.scatter(G[:,0],G[:,1],marker=\"s\",label=\"鲑鱼\")\n# 分类模型\nx = np.linspace(0,8)\ny = -x+9\nplt.plot(x,y, color=\"red\")\nplt.legend()\nplt.show()\n\n\n#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:\n#计算准确率\ncount=0\nfor i in L:\n if i[0]+i[1]<=9:\n count=count+1\nq=(count/((int(a))*50))\nprint('鲈鱼准确率:%s'%(count/((int(a))*50)))\ncountG=0\nfor i in G:\n if i[0]+i[1]>=9:\n countG=countG+1\np=(countG/((100-int(a))*50))\nprint('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))\n\n#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)\npb=(int(a)/100)*q + (1-(int(a)/100))*p\nprint(pb)\n#p(ab)=p(b|a)*p(a)\npab=(int(a)/100)*q\nprint(pab)\nprint(pab/pb)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
from postimg import postimg
import argparse
import pyperclip
import json
def main(args):
if not args.quiet:
print("Uploading.....")
resp = postimg.Imgur(args.img_path).upload()
if not resp['success']:
if not args.quiet:
print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))
print("Unable to upload !!!")
return None
link = resp['data']['link']
if args.github:
link = ''%link
elif args.reddit:
link = '[Reddit](%s)'%link
elif args.html:
link = '<img src="%s" alt="snap">'%link
pyperclip.copy(link)
print(link)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')
parser.add_argument('img_path', type=str, help='image path of file')
parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')
parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')
parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')
parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')
args = parser.parse_args()
try:
main(args)
except KeyboardInterrupt:
print("Error: Interrupted by user!!")
|
normal
|
{
"blob_id": "705755340eef72470fc982ebd0004456469d23e4",
"index": 4859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Post/upload image on imgur.com', epilog=\n 'link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help=\n 'Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help=\n 'html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help=\n 'reddit markdown code of imgur url')\n parser.add_argument('-q', '--quiet', action='store_true', help=\n 'print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print('Error: Interrupted by user!!')\n",
"step-4": "from postimg import postimg\nimport argparse\nimport pyperclip\nimport json\n\n\ndef main(args):\n if not args.quiet:\n print('Uploading.....')\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(\n ',', ': ')))\n print('Unable to upload !!!')\n return None\n link = resp['data']['link']\n if args.github:\n link = '' % link\n elif args.reddit:\n link = '[Reddit](%s)' % link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">' % link\n pyperclip.copy(link)\n print(link)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Post/upload image on imgur.com', epilog=\n 'link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help=\n 'Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help=\n 'html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help=\n 'reddit markdown code of imgur url')\n parser.add_argument('-q', '--quiet', action='store_true', help=\n 'print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print('Error: Interrupted by user!!')\n",
"step-5": "#!/usr/bin/env python\nfrom postimg import postimg\nimport argparse\nimport pyperclip\nimport json\ndef main(args):\n if not args.quiet:\n print(\"Uploading.....\")\n resp = postimg.Imgur(args.img_path).upload()\n if not resp['success']:\n if not args.quiet:\n print(json.dumps(resp, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"Unable to upload !!!\")\n return None\n link = resp['data']['link']\n if args.github:\n link = ''%link\n elif args.reddit:\n link = '[Reddit](%s)'%link\n elif args.html:\n link = '<img src=\"%s\" alt=\"snap\">'%link\n pyperclip.copy(link)\n print(link)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Post/upload image on imgur.com', epilog='link will automatically copied to clipboard')\n parser.add_argument('img_path', type=str, help='image path of file')\n parser.add_argument('--github', action='store_true', help='Github markdown code of imgur url')\n parser.add_argument('--html', action='store_true', help='html <img> code of imgur url')\n parser.add_argument('--reddit', action='store_true', help='reddit markdown code of imgur url')\n parser.add_argument('-q','--quiet', action='store_true', help='print only img url without verbose output')\n args = parser.parse_args()\n try:\n main(args)\n except KeyboardInterrupt:\n print(\"Error: Interrupted by user!!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" Python Package Support """
# Not applicable
""" Django Package Support """
# Not applicable
""" Internal Package Support """
from Data_Base.models import School, Person, Child
"""
Data_Base/Data/Imports/child_import.py
Author: Matthew J Swann;
Yong Kin;
Bradon Atkins; and
Adam Carter
Version: 1.0
Last Update: 2013-04-07
Update By: Matthew J Swann
Importing data to the person table.
"""
class ChildImport(object):
def __init__(self, scriptName=None):
# 1
x = Child.objects.create(
first_name = 'Timmy',
last_name = 'Thompson',
school = School.objects.get(pk=1),
)
x.family.add(Person.objects.get(pk=1))
x.family.add(Person.objects.get(pk=2))
x.save()
# 2
x = Child.objects.create(
first_name = 'Jimmy',
last_name = 'Johnson',
school = School.objects.get(pk=2),
)
x.family.add(Person.objects.get(pk=2))
x.family.add(Person.objects.get(pk=1))
x.save()
# 3
x = Child.objects.create(
first_name = 'Bart',
last_name = 'Simpson',
school = School.objects.get(pk=3),
)
x.family.add(Person.objects.get(pk=3))
x.family.add(Person.objects.get(pk=4))
x.save()
# 4
x = Child.objects.create(
first_name = 'Lisa',
last_name = 'Simpson',
school = School.objects.get(pk=4),
)
x.family.add(Person.objects.get(pk=4))
x.family.add(Person.objects.get(pk=3))
x.save()
# 5
x = Child.objects.create(
first_name = 'Andrew',
last_name = 'Becker',
school = School.objects.get(pk=5),
)
x.family.add(Person.objects.get(pk=5))
x.family.add(Person.objects.get(pk=6))
x.save()
# 6
x = Child.objects.create(
first_name = 'Jasmine',
last_name = 'Goulette',
school = School.objects.get(pk=6),
)
x.family.add(Person.objects.get(pk=6))
x.family.add(Person.objects.get(pk=5))
x.save()
# 7
x = Child.objects.create(
first_name = 'Kristina',
last_name = 'Murry',
school = School.objects.get(pk=7),
)
x.family.add(Person.objects.get(pk=7))
x.family.add(Person.objects.get(pk=8))
x.save()
# 8
x = Child.objects.create(
first_name = 'Andrew',
last_name = 'Scheonster',
school = School.objects.get(pk=8),
)
x.family.add(Person.objects.get(pk=8))
x.family.add(Person.objects.get(pk=7))
x.save()
|
normal
|
{
"blob_id": "d0287b057530883a50ad9c1e5e74dce10cd825b6",
"index": 7961,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ChildImport(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ChildImport(object):\n\n def __init__(self, scriptName=None):\n x = Child.objects.create(first_name='Timmy', last_name='Thompson',\n school=School.objects.get(pk=1))\n x.family.add(Person.objects.get(pk=1))\n x.family.add(Person.objects.get(pk=2))\n x.save()\n x = Child.objects.create(first_name='Jimmy', last_name='Johnson',\n school=School.objects.get(pk=2))\n x.family.add(Person.objects.get(pk=2))\n x.family.add(Person.objects.get(pk=1))\n x.save()\n x = Child.objects.create(first_name='Bart', last_name='Simpson',\n school=School.objects.get(pk=3))\n x.family.add(Person.objects.get(pk=3))\n x.family.add(Person.objects.get(pk=4))\n x.save()\n x = Child.objects.create(first_name='Lisa', last_name='Simpson',\n school=School.objects.get(pk=4))\n x.family.add(Person.objects.get(pk=4))\n x.family.add(Person.objects.get(pk=3))\n x.save()\n x = Child.objects.create(first_name='Andrew', last_name='Becker',\n school=School.objects.get(pk=5))\n x.family.add(Person.objects.get(pk=5))\n x.family.add(Person.objects.get(pk=6))\n x.save()\n x = Child.objects.create(first_name='Jasmine', last_name='Goulette',\n school=School.objects.get(pk=6))\n x.family.add(Person.objects.get(pk=6))\n x.family.add(Person.objects.get(pk=5))\n x.save()\n x = Child.objects.create(first_name='Kristina', last_name='Murry',\n school=School.objects.get(pk=7))\n x.family.add(Person.objects.get(pk=7))\n x.family.add(Person.objects.get(pk=8))\n x.save()\n x = Child.objects.create(first_name='Andrew', last_name=\n 'Scheonster', school=School.objects.get(pk=8))\n x.family.add(Person.objects.get(pk=8))\n x.family.add(Person.objects.get(pk=7))\n x.save()\n",
"step-4": "<mask token>\nfrom Data_Base.models import School, Person, Child\n<mask token>\n\n\nclass ChildImport(object):\n\n def __init__(self, scriptName=None):\n x = Child.objects.create(first_name='Timmy', last_name='Thompson',\n school=School.objects.get(pk=1))\n x.family.add(Person.objects.get(pk=1))\n x.family.add(Person.objects.get(pk=2))\n x.save()\n x = Child.objects.create(first_name='Jimmy', last_name='Johnson',\n school=School.objects.get(pk=2))\n x.family.add(Person.objects.get(pk=2))\n x.family.add(Person.objects.get(pk=1))\n x.save()\n x = Child.objects.create(first_name='Bart', last_name='Simpson',\n school=School.objects.get(pk=3))\n x.family.add(Person.objects.get(pk=3))\n x.family.add(Person.objects.get(pk=4))\n x.save()\n x = Child.objects.create(first_name='Lisa', last_name='Simpson',\n school=School.objects.get(pk=4))\n x.family.add(Person.objects.get(pk=4))\n x.family.add(Person.objects.get(pk=3))\n x.save()\n x = Child.objects.create(first_name='Andrew', last_name='Becker',\n school=School.objects.get(pk=5))\n x.family.add(Person.objects.get(pk=5))\n x.family.add(Person.objects.get(pk=6))\n x.save()\n x = Child.objects.create(first_name='Jasmine', last_name='Goulette',\n school=School.objects.get(pk=6))\n x.family.add(Person.objects.get(pk=6))\n x.family.add(Person.objects.get(pk=5))\n x.save()\n x = Child.objects.create(first_name='Kristina', last_name='Murry',\n school=School.objects.get(pk=7))\n x.family.add(Person.objects.get(pk=7))\n x.family.add(Person.objects.get(pk=8))\n x.save()\n x = Child.objects.create(first_name='Andrew', last_name=\n 'Scheonster', school=School.objects.get(pk=8))\n x.family.add(Person.objects.get(pk=8))\n x.family.add(Person.objects.get(pk=7))\n x.save()\n",
"step-5": "\"\"\" Python Package Support \"\"\"\n# Not applicable\n\n\"\"\" Django Package Support \"\"\"\n# Not applicable\n\n\"\"\" Internal Package Support \"\"\"\nfrom Data_Base.models import School, Person, Child\n\n\"\"\"\n Data_Base/Data/Imports/child_import.py\n \n Author: Matthew J Swann; \n Yong Kin; \n Bradon Atkins; and \n Adam Carter\n \n Version: 1.0\n Last Update: 2013-04-07\n Update By: Matthew J Swann\n \n Importing data to the person table.\n\n \"\"\"\n \nclass ChildImport(object):\n \n def __init__(self, scriptName=None):\n \n # 1\n x = Child.objects.create(\n first_name = 'Timmy',\n last_name = 'Thompson',\n school = School.objects.get(pk=1), \n )\n x.family.add(Person.objects.get(pk=1))\n x.family.add(Person.objects.get(pk=2))\n x.save()\n\n # 2\n x = Child.objects.create(\n first_name = 'Jimmy',\n last_name = 'Johnson',\n school = School.objects.get(pk=2), \n )\n x.family.add(Person.objects.get(pk=2))\n x.family.add(Person.objects.get(pk=1))\n x.save()\n \n # 3\n x = Child.objects.create(\n first_name = 'Bart',\n last_name = 'Simpson',\n school = School.objects.get(pk=3), \n )\n x.family.add(Person.objects.get(pk=3))\n x.family.add(Person.objects.get(pk=4))\n x.save()\n \n # 4\n x = Child.objects.create(\n first_name = 'Lisa',\n last_name = 'Simpson',\n school = School.objects.get(pk=4), \n )\n x.family.add(Person.objects.get(pk=4))\n x.family.add(Person.objects.get(pk=3))\n x.save()\n \n # 5\n x = Child.objects.create(\n first_name = 'Andrew',\n last_name = 'Becker',\n school = School.objects.get(pk=5), \n )\n x.family.add(Person.objects.get(pk=5))\n x.family.add(Person.objects.get(pk=6))\n x.save()\n \n # 6\n x = Child.objects.create(\n first_name = 'Jasmine',\n last_name = 'Goulette',\n school = School.objects.get(pk=6), \n )\n x.family.add(Person.objects.get(pk=6))\n x.family.add(Person.objects.get(pk=5))\n x.save()\n \n # 7\n x = Child.objects.create(\n first_name = 'Kristina',\n last_name = 'Murry',\n school = School.objects.get(pk=7), \n )\n x.family.add(Person.objects.get(pk=7))\n x.family.add(Person.objects.get(pk=8))\n x.save()\n\n # 8\n x = Child.objects.create(\n first_name = 'Andrew',\n last_name = 'Scheonster',\n school = School.objects.get(pk=8), \n )\n x.family.add(Person.objects.get(pk=8))\n x.family.add(Person.objects.get(pk=7))\n x.save()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
class Advertisement(models.Model):
title = models.CharField(max_length=1500, db_index=True, verbose_name='Заголовок')
description = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
price = models.FloatField(verbose_name='цена', default=0)
views_count = models.IntegerField(verbose_name='количество просмотров', default=0)
status = models.ForeignKey('AdvertisementStatus', default=None,
null=True, on_delete=models.CASCADE,
related_name='advertisements', verbose_name='Статус')
def __str__(self):
return self.title
class Meta:
db_table = 'advertisements'
ordering = ['title']
class AdvertisementStatus(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Authors(models.Model):
name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')
email = models.EmailField()
phone = models.CharField(max_length=20, verbose_name='Телефон')
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "c5bdbcc8ba38b02e5e5cf8b53362e87ba761443d",
"index": 8654,
"step-1": "<mask token>\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Advertisement(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Advertisement(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Advertisement(models.Model):\n title = models.CharField(max_length=1500, db_index=True, verbose_name=\n 'Заголовок')\n description = models.TextField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n price = models.FloatField(verbose_name='цена', default=0)\n views_count = models.IntegerField(verbose_name='количество просмотров',\n default=0)\n status = models.ForeignKey('AdvertisementStatus', default=None, null=\n True, on_delete=models.CASCADE, related_name='advertisements',\n verbose_name='Статус')\n\n def __str__(self):\n return self.title\n\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\n\n# Create your models here.\n\n\nclass Advertisement(models.Model):\n title = models.CharField(max_length=1500, db_index=True, verbose_name='Заголовок')\n description = models.TextField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n update_at = models.DateTimeField(auto_now=True)\n price = models.FloatField(verbose_name='цена', default=0)\n views_count = models.IntegerField(verbose_name='количество просмотров', default=0)\n status = models.ForeignKey('AdvertisementStatus', default=None,\n null=True, on_delete=models.CASCADE,\n related_name='advertisements', verbose_name='Статус')\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = 'advertisements'\n ordering = ['title']\n\nclass AdvertisementStatus(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Authors(models.Model):\n name = models.CharField(max_length=20, db_index=True, verbose_name='ФИО')\n email = models.EmailField()\n phone = models.CharField(max_length=20, verbose_name='Телефон')\n\n def __str__(self):\n return self.name\n\n\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import random
def sim_data():
# Parameters
n_samples = random.randint(500, 5000)
n_features = random.randint(5, 25)
n_informative = random.randint(5, n_features)
noise = random.uniform(0.5, 2)
# Simulate data
X, y = make_regression(n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
noise=noise)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Param dict
params = {"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_informative,
"noise": noise}
# Return
return X_train, y_train, X_test, y_test, params
|
normal
|
{
"blob_id": "c4aa5869d5f916f13aa924c19dc9792337619b31",
"index": 4011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sim_data():\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n X, y = make_regression(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, noise=noise)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n params = {'n_samples': n_samples, 'n_features': n_features,\n 'n_informative': n_informative, 'noise': noise}\n return X_train, y_train, X_test, y_test, params\n",
"step-3": "from sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport random\n\n\ndef sim_data():\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n X, y = make_regression(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, noise=noise)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n params = {'n_samples': n_samples, 'n_features': n_features,\n 'n_informative': n_informative, 'noise': noise}\n return X_train, y_train, X_test, y_test, params\n",
"step-4": "from sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport random\n\ndef sim_data():\n\n # Parameters\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n\n # Simulate data\n X, y = make_regression(n_samples=n_samples,\n n_features=n_features,\n n_informative=n_informative,\n noise=noise)\n\n # Train test split\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n # Param dict\n params = {\"n_samples\": n_samples,\n \"n_features\": n_features,\n \"n_informative\": n_informative,\n \"noise\": noise}\n\n # Return\n return X_train, y_train, X_test, y_test, params\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from modules.core.logging.logging_service import LoggingService
from modules.core.logging.models import LogLevel, LogEntry
import pytest
from .setup import register_test_db, register_test_injections, teardown,\
drop_all_collections
@pytest.fixture(autouse=True)
def setup():
register_test_db()
register_test_injections()
def test_mongo_logging_client_persists_log():
"""
Test to see if the mongodb client logger
can persist a log entry to the database
"""
error_message = "This is a test message."
logger = LoggingService(console_output=True)
result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))
logger.log(LogEntry(LogLevel.WARN, __name__, error_message))
logger.log(LogEntry(LogLevel.INFO, __name__, error_message))
logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))
assert result.message == error_message
def tests_teardown():
drop_all_collections()
teardown()
|
normal
|
{
"blob_id": "a29cf9e7006d52cea8f5ccdcbc2087983ffa3ef3",
"index": 2973,
"step-1": "<mask token>\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n",
"step-3": "<mask token>\n\n\[email protected](autouse=True)\ndef setup():\n register_test_db()\n register_test_injections()\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n",
"step-4": "from modules.core.logging.logging_service import LoggingService\nfrom modules.core.logging.models import LogLevel, LogEntry\nimport pytest\nfrom .setup import register_test_db, register_test_injections, teardown, drop_all_collections\n\n\[email protected](autouse=True)\ndef setup():\n register_test_db()\n register_test_injections()\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n",
"step-5": "from modules.core.logging.logging_service import LoggingService\nfrom modules.core.logging.models import LogLevel, LogEntry\nimport pytest\nfrom .setup import register_test_db, register_test_injections, teardown,\\\n drop_all_collections\n\n\[email protected](autouse=True)\ndef setup():\n register_test_db()\n register_test_injections()\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n\n error_message = \"This is a test message.\"\n logger = LoggingService(console_output=True)\n\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 3.2.6 on 2021-08-19 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0040_auto_20210819_1913'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='full_name',
),
migrations.RemoveField(
model_name='managercrm',
name='full_name',
),
]
|
normal
|
{
"blob_id": "42f021c728a88f34d09f94ea96d91abded8a29fb",
"index": 9553,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('crm', '0040_auto_20210819_1913')]\n operations = [migrations.RemoveField(model_name='customer', name=\n 'full_name'), migrations.RemoveField(model_name='managercrm', name=\n 'full_name')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('crm', '0040_auto_20210819_1913')]\n operations = [migrations.RemoveField(model_name='customer', name=\n 'full_name'), migrations.RemoveField(model_name='managercrm', name=\n 'full_name')]\n",
"step-5": "# Generated by Django 3.2.6 on 2021-08-19 16:17\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('crm', '0040_auto_20210819_1913'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='customer',\n name='full_name',\n ),\n migrations.RemoveField(\n model_name='managercrm',\n name='full_name',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys, math
nums = sys.stdin.readline().split(" ")
my_set = set()
my_list = []
for i in xrange(int(nums[1])):
inpt = int(sys.stdin.readline())
my_set.add(inpt)
my_list.append(inpt)
x = 0
for i in xrange(1, int(nums[0]) + 1):
if (i in my_set):
continue
while (x < len(my_list) and my_list[x] < i):
print my_list[x]
x += 1
print i
while (x < len(my_list)):
print my_list[x]
x += 1
|
normal
|
{
"blob_id": "3efa5eb97af116929a7426ed3bfb5e4a170cfacd",
"index": 3014,
"step-1": "import sys, math\n\nnums = sys.stdin.readline().split(\" \")\nmy_set = set()\nmy_list = []\nfor i in xrange(int(nums[1])):\n inpt = int(sys.stdin.readline())\n my_set.add(inpt)\n my_list.append(inpt)\n\nx = 0\nfor i in xrange(1, int(nums[0]) + 1):\n if (i in my_set): \n continue\n while (x < len(my_list) and my_list[x] < i):\n print my_list[x]\n x += 1\n print i\n\nwhile (x < len(my_list)):\n print my_list[x]\n x += 1\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-Today Serpent Consulting Services Pvt.Ltd. (<http://www.serpentcs.com>).
# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from odoo import api, models
import time
class location_accommodation(models.AbstractModel):
_name = 'report.sg_accommodation.view_location_report'
@api.model
def get_companies(self):
company_list=[]
self.td_list = []
comp_ids=self.env['res.company'].search([('tenant', '=', True)])
for comp in comp_ids:
company_list.append(comp.company_code)
if company_list:
company_list.sort()
no_of_td=company_list
for td in range(0,len(no_of_td)):
self.td_list.append(td)
return company_list
@api.multi
def render_html(self, docids, data=None):
report = self.env['report']._get_report_from_name('sg_accommodation.view_location_report')
records = self.env['accommodation.accommodation'].browse(self.ids)
docargs = {'doc_ids' : self.ids,
'doc_model' : report.model,
'data' : data,
'docs' : records,
'time' : time,
'get_companies' : self.get_companies}
return self.env['report'].render('sg_accommodation.view_location_report', docargs)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
normal
|
{
"blob_id": "ac99c19294661657d383b036c9ab83e7b610cb7d",
"index": 6896,
"step-1": "<mask token>\n\n\nclass location_accommodation(models.AbstractModel):\n <mask token>\n <mask token>\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-2": "<mask token>\n\n\nclass location_accommodation(models.AbstractModel):\n <mask token>\n\n @api.model\n def get_companies(self):\n company_list = []\n self.td_list = []\n comp_ids = self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td = company_list\n for td in range(0, len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-3": "<mask token>\n\n\nclass location_accommodation(models.AbstractModel):\n _name = 'report.sg_accommodation.view_location_report'\n\n @api.model\n def get_companies(self):\n company_list = []\n self.td_list = []\n comp_ids = self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td = company_list\n for td in range(0, len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-4": "from odoo import api, models\nimport time\n\n\nclass location_accommodation(models.AbstractModel):\n _name = 'report.sg_accommodation.view_location_report'\n\n @api.model\n def get_companies(self):\n company_list = []\n self.td_list = []\n comp_ids = self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td = company_list\n for td in range(0, len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name(\n 'sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids': self.ids, 'doc_model': report.model, 'data':\n data, 'docs': records, 'time': time, 'get_companies': self.\n get_companies}\n return self.env['report'].render(\n 'sg_accommodation.view_location_report', docargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2011-Today Serpent Consulting Services Pvt.Ltd. (<http://www.serpentcs.com>).\n# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n#\n##############################################################################\nfrom odoo import api, models\nimport time\n\n\nclass location_accommodation(models.AbstractModel):\n _name = 'report.sg_accommodation.view_location_report'\n\n @api.model\n def get_companies(self):\n company_list=[]\n self.td_list = []\n comp_ids=self.env['res.company'].search([('tenant', '=', True)])\n for comp in comp_ids:\n company_list.append(comp.company_code)\n if company_list:\n company_list.sort()\n no_of_td=company_list\n for td in range(0,len(no_of_td)):\n self.td_list.append(td)\n return company_list\n\n @api.multi\n def render_html(self, docids, data=None):\n report = self.env['report']._get_report_from_name('sg_accommodation.view_location_report')\n records = self.env['accommodation.accommodation'].browse(self.ids)\n docargs = {'doc_ids' : self.ids,\n 'doc_model' : report.model,\n 'data' : data,\n 'docs' : records,\n 'time' : time,\n 'get_companies' : self.get_companies}\n return self.env['report'].render('sg_accommodation.view_location_report', docargs)\n \n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer()
|
normal
|
{
"blob_id": "576d6bec4a91ba6f0597b76a5da5ad3ef6562b19",
"index": 9592,
"step-1": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n <mask token>\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n <mask token>\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n <mask token>\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n <mask token>\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n <mask token>\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n\n def tourOrdi(self):\n joueur = 1\n self.profondeur = 0\n self.value = self.alphabeta(joueur, -np.inf, np.inf)\n for idCoupe in self.arbreFils.keys():\n print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[\n idCoupe].value)\n for idCoupe in self.arbreFils.keys():\n if self.value == self.arbreFils[idCoupe].value:\n self.deplacer(joueur, idCoupe)\n break\n self.jouer()\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-4": "<mask token>\npygame.init()\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n\n def tourDuJoueur(self):\n joueur = 0\n coupesAdmissibles = self.coupesAdmissibles(joueur)\n print(\n \"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:\"\n )\n nCoupe = int(input())\n while (nCoupe < 0 or nCoupe > self.nCoupes - 1 or not nCoupe in\n coupesAdmissibles):\n print(\n 'Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.'\n )\n nCoupe = int(input())\n self.deplacer(joueur, nCoupe)\n self.jouer()\n\n def tourOrdi(self):\n joueur = 1\n self.profondeur = 0\n self.value = self.alphabeta(joueur, -np.inf, np.inf)\n for idCoupe in self.arbreFils.keys():\n print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[\n idCoupe].value)\n for idCoupe in self.arbreFils.keys():\n if self.value == self.arbreFils[idCoupe].value:\n self.deplacer(joueur, idCoupe)\n break\n self.jouer()\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\nt = terrainDeJeu(nCoupes=6, nGrainesParCoupelle=4, profondeur=8)\nt.jouer()\n",
"step-5": "import numpy as np\r\n#!pip install pygame\r\nimport pygame\r\n#from copy import deepcopy\r\npygame.init()\r\n#-----------\r\n# Modifications (Matthieu, 15/04):\r\n# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.\r\n# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)\r\n# Les indices de la liste correspondant à chaque coupe sont par exemple :\r\n# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)\r\n# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)\r\n# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy\r\n# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)\r\n# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout\r\n# Algo alpha beta\r\n# Pbs : \r\n# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini\r\n# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de % \r\n# sont gagnées par l'ia contre un algo qui joue aléatoirement\r\n# Améliorer la fonction d'évaluation qui est pour l'instant très basique\r\n##-------------\r\n# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),\r\n# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.\r\n# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.\r\n# A chaque tour, le joueur doit choisir un numéro de coupelle.\r\n# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.\r\n#\r\n# modifs du 17.03 par Léo:\r\n# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé\r\n# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant\r\n# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.\r\n#Notions de classe:\r\n#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes\r\n#Explication de l'algorithme minimax général (page 52) :\r\n#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf\r\n#Code par Léo et Paul\r\n#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)\r\n# -> se pencher sur la fonction \"partieFinie\" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..\r\n#Pb: structure d'arbre trop compliquée: (*)\r\n#l'arbre est construit à partir d'une liste selon le principe suivant:\r\n#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes\r\n#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue\r\nclass terrainDeJeu:\r\n # [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)\r\n # [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)\r\n def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur\r\n self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)\r\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\r\n self.nCoupes = nCoupes\r\n self.scores = [0,0] # scores[0] = score du joueur 0...\r\n self.tour = 0\r\n self.finie = False\r\n self.profondeurMinimax = profondeur\r\n self.arbreFils = {}\r\n \r\n \r\n #clone le terrain de jeu pour pouvoir simuler un coup par la suite\r\n def clone(self):\r\n clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)\r\n clone.plateau= self.plateau.copy()\r\n clone.scores = self.scores.copy()\r\n clone.tour = self.tour\r\n clone.finie = self.finie\r\n return clone\r\n \r\n #retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)\r\n def coupeSuivante(self,idCoupe):\r\n return (idCoupe + 1)%(2*self.nCoupes)\r\n #retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)\r\n def coupePrecedente(self,idCoupe):\r\n return (idCoupe - 1)%(2*self.nCoupes)\r\n #retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe\r\n def joueurCoupe(self,idCoupe):\r\n return 0 if idCoupe < self.nCoupes else 1\r\n #retourne si idCoupe peut être prise (contient 2 ou 3 graines)\r\n def coupePrenable(self,idCoupe):\r\n return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)\r\n def deplacer(self,joueur,idCoupe):\r\n coupeInitiale = idCoupe #id de la coupelle choisie\r\n nGraines = self.plateau[idCoupe]\r\n self.plateau[idCoupe] = 0\r\n while (nGraines != 0): #On redistribue les graines de la coupelle initiale\r\n idCoupe = self.coupeSuivante(idCoupe)\r\n if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale\r\n self.plateau[idCoupe] += 1\r\n nGraines -= 1\r\n coupeFinale = idCoupe\r\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\r\n if (joueur != joueurCoupeFinale): \r\n #on vérifie si on va affamer l'adversaire\r\n #si non, on prend les graines normalement\r\n if (self.nourrirAdversaire(joueur,coupeFinale)):\r\n while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):\r\n self.scores[joueur]+=self.plateau[idCoupe]\r\n self.plateau[idCoupe]=0\r\n idCoupe = self.coupePrecedente(idCoupe)\r\n #si on va affamer l'adversaire :\r\n # on ne prend aucune graine donc on ne fait rien\r\n self.tour=(self.tour+1)%2\r\n \r\n #On compte le nombre de graines restantes sur le plateau\r\n def grainesRestantes(self): \r\n return np.sum(self.plateau)\r\n #on compte le nombre de graines restantes sur le plateau pour les coupes de joueur\r\n def grainesRestantesJoueur(self,joueur):\r\n if joueur==0:\r\n return np.sum(self.plateau[0:self.nCoupes])\r\n else:\r\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\r\n #détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,\r\n #Yson adversaire sera affamé ou pas \r\n #on regarde donc si il restera au moins une graine sur le terrain de l'adversaire\r\n def nourrirAdversaire(self,joueur,coupeFinale): \r\n adversaire = (joueur+1)%2 \r\n #on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)\r\n admissible = False\r\n idCoupe = (self.nCoupes*(adversaire+1))-1\r\n while (self.joueurCoupe(idCoupe)==adversaire):\r\n #si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible\r\n if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):\r\n admissible=True\r\n #si joueur peut pas prendre la coupe idCoupe le coup est admissible\r\n elif (not self.coupePrenable(idCoupe)):\r\n admissible=True\r\n idCoupe=self.coupePrecedente(idCoupe)\r\n #True si le coup est admissible pour la règle \"nourrir\"\r\n return admissible \r\n #coupes admissibles que peut jouer joueur pour nourrir son adversaire\r\n def coupesAdmissiblesNourrir(self,joueur):\r\n coupesAdmissibles = []\r\n #on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)\r\n idCoupe = (self.nCoupes*(joueur+1))-1\r\n distance = 1\r\n while (self.joueurCoupe(idCoupe)==joueur):\r\n #s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire\r\n #le coup est admissible, au moins une graine nourrira l'adversaire\r\n if self.plateau[idCoupe]>=distance:\r\n coupesAdmissibles.append(idCoupe)\r\n idCoupe = self.coupePrecedente(idCoupe)\r\n distance +=1\r\n return coupesAdmissibles\r\n def coupesAdmissibles(self,joueur):\r\n adversaire = (joueur+1)%2\r\n if self.grainesRestantesJoueur(adversaire) == 0:\r\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\r\n #si aucun coup ne peut être joué pour nourrir l'adversaire\r\n if len(coupesAdmissibles)==0:\r\n self.scores[joueur] += self.grainesRestantes()\r\n self.plateau = np.zeros(2*self.nCoupes,dtype=int)\r\n self.finie = True\r\n #partie terminée\r\n \r\n #sinon toutes les coupes non vides sont admissibles\r\n else :\r\n coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]\r\n \r\n return coupesAdmissibles\r\n \r\n def tourDuJoueur(self):\r\n joueur = 0\r\n #si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir\r\n coupesAdmissibles = self.coupesAdmissibles(joueur)\r\n print(\"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:\")\r\n nCoupe = int(input())\r\n #print(\"coupesAdmissibles\",coupesAdmissibles)\r\n while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):\r\n #cas où la coupelle n'existe pas, ou correspond à un coup non admissible\r\n print(\"Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.\")\r\n nCoupe = int(input())\r\n self.deplacer(joueur,nCoupe)\r\n self.jouer()\r\n \r\n def tourOrdi(self):\r\n joueur = 1\r\n self.profondeur = 0\r\n self.value = self.alphabeta(joueur,-np.inf,np.inf)\r\n for idCoupe in self.arbreFils.keys():\r\n print(\"coupe = \",idCoupe,\" : valeur = \",self.arbreFils[idCoupe].value)\r\n for idCoupe in self.arbreFils.keys():\r\n if self.value==self.arbreFils[idCoupe].value:\r\n self.deplacer(joueur,idCoupe)\r\n break\r\n \r\n \r\n self.jouer()\r\n \r\n def partieFinie(self):\r\n #True si le plateau ne contient plus aucune graine\r\n limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit\r\n self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)\r\n return self.finie\r\n\r\n def afficherPlateau(self):\r\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste\r\n\r\n def afficherScores(self):\r\n print(\"score J1.........\"+str(self.scores[0]))\r\n print(\"score MinMax.....\"+str(self.scores[1]))\r\n\r\n def evaluation(self,joueur):\r\n adversaire = (joueur+1)%2\r\n return self.scores[joueur]-self.scores[adversaire]\r\n \r\n \r\n #Fonction principale\r\n def jouer(self):\r\n \r\n if (not self.partieFinie()) :\r\n self.afficherPlateau()\r\n self.afficherScores()\r\n if (self.tour==0):\r\n self.tourDuJoueur()\r\n else:\r\n self.tourOrdi()\r\n print(\"\\n\")\r\n else:\r\n self.afficherPlateau()\r\n self.afficherScores()\r\n print(\"Partie Finie !\")\r\n\r\n #plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta\r\n def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)\r\n #On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)\r\n self.arbreFils = {}\r\n \r\n #on détermine les coups possibles\r\n #si aucun coup n'est possible cette fonction arrête aussi la partie\r\n coupesPossibles = self.coupesAdmissibles(self.tour) \r\n \r\n if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base\r\n self.value = self.evaluation(joueurMaximisant)\r\n return self.value\r\n \r\n if self.tour==joueurMaximisant:\r\n fctComparaison = max\r\n self.value = - np.inf\r\n else:\r\n fctComparaison = min\r\n self.value = np.inf\r\n \r\n #on parcourt tous les coups possibles\r\n for idCoupe in coupesPossibles:\r\n fils=self.clone()\r\n fils.profondeur=self.profondeur+1\r\n fils.deplacer(fils.tour,idCoupe)\r\n fils.value = fils.minimax(joueurMaximisant)\r\n \r\n #on ne remplit effectivement l'arbre (attribut arbreFils)\r\n #que pour une profondeur < à profondeurArbre\r\n #on pourrait même ne pas le remplir du tout mais profondeurArbre = 1\r\n #permet d'afficher les valeurs associées à chaque coup...\r\n if (self.profondeur < profondeurArbre):\r\n self.arbreFils[idCoupe]=fils\r\n self.value = fctComparaison(self.value, fils.value)\r\n \r\n return self.value\r\n \r\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)\r\n #On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)\r\n self.arbreFils = {}\r\n \r\n #on détermine les coups possibles\r\n #si aucun coup n'est possible cette fonction arrête aussi la partie\r\n coupesPossibles = self.coupesAdmissibles(self.tour) \r\n \r\n if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base\r\n self.value = self.evaluation(joueurMaximisant)\r\n return self.value\r\n \r\n if self.tour==joueurMaximisant:\r\n fctComparaison = max\r\n self.value = - np.inf\r\n else:\r\n fctComparaison = min\r\n self.value = np.inf\r\n \r\n #on parcourt tous les coups possibles\r\n for idCoupe in coupesPossibles:\r\n fils=self.clone()\r\n fils.profondeur=self.profondeur+1\r\n fils.deplacer(fils.tour,idCoupe)\r\n fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)\r\n \r\n #on ne remplit effectivement l'arbre (attribut arbreFils)\r\n #que pour une profondeur < à profondeurArbre\r\n #on pourrait même ne pas le remplir du tout mais profondeurArbre = 1\r\n #permet d'afficher les valeurs associées à chaque coup...\r\n if (self.profondeur < profondeurArbre):\r\n self.arbreFils[idCoupe]=fils\r\n \r\n self.value = fctComparaison(self.value, fils.value)\r\n \r\n #coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible\r\n if self.tour==joueurMaximisant:\r\n if self.value >= beta:\r\n return self.value\r\n alpha = fctComparaison(alpha,self.value)\r\n else:\r\n if alpha >= self.value:\r\n return self.value\r\n beta = fctComparaison(beta,self.value)\r\n \r\n return self.value\r\n \r\n \r\n\r\nt = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)\r\nt.jouer()",
"step-ids": [
14,
20,
21,
24,
26
]
}
|
[
14,
20,
21,
24,
26
] |
alien_color = 'green'
if alien_color == 'green':
print('you earned 5 points')
alien_color2 = 'yellow'
if alien_color2 == 'green':
print ('your earned 5 points')
if alien_color2 == 'yellow':
print('Right answer')
# 5.4
alien_color = 'green'
if alien_color == 'green':
print('you earned 5 points')
else:
print('your earned 10 points')
# 5.5
alien_color = 'green'
if alien_color == 'green':
print('you earned 5 points')
elif alien_color == 'yellow':
return ('your earned 10 points')
else:
print('your earned 15 points')
|
normal
|
{
"blob_id": "30e4c4c5ef944b0cd2d36b2fe5f7eee39dff1d16",
"index": 6511,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif alien_color == 'green':\n print('you earned 5 points')\n<mask token>\nif alien_color2 == 'green':\n print('your earned 5 points')\nif alien_color2 == 'yellow':\n print('Right answer')\n<mask token>\nif alien_color == 'green':\n print('you earned 5 points')\nelse:\n print('your earned 10 points')\n<mask token>\nif alien_color == 'green':\n print('you earned 5 points')\nelif alien_color == 'yellow':\n return 'your earned 10 points'\nelse:\n print('your earned 15 points')\n",
"step-3": "alien_color = 'green'\nif alien_color == 'green':\n print('you earned 5 points')\nalien_color2 = 'yellow'\nif alien_color2 == 'green':\n print('your earned 5 points')\nif alien_color2 == 'yellow':\n print('Right answer')\nalien_color = 'green'\nif alien_color == 'green':\n print('you earned 5 points')\nelse:\n print('your earned 10 points')\nalien_color = 'green'\nif alien_color == 'green':\n print('you earned 5 points')\nelif alien_color == 'yellow':\n return 'your earned 10 points'\nelse:\n print('your earned 15 points')\n",
"step-4": "alien_color = 'green'\r\nif alien_color == 'green':\r\n print('you earned 5 points')\r\n\r\nalien_color2 = 'yellow'\r\nif alien_color2 == 'green':\r\n print ('your earned 5 points')\r\nif alien_color2 == 'yellow':\r\n print('Right answer')\r\n\r\n# 5.4\r\nalien_color = 'green'\r\nif alien_color == 'green':\r\n print('you earned 5 points')\r\nelse:\r\n print('your earned 10 points')\r\n\r\n\r\n# 5.5\r\nalien_color = 'green'\r\nif alien_color == 'green':\r\n print('you earned 5 points')\r\nelif alien_color == 'yellow':\r\n return ('your earned 10 points')\r\nelse:\r\n print('your earned 15 points')\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy
from nn_functor import functions
class Linear(functions.Learn):
def implement(self, a, p):
x = a[0]
w, b0 = p
return w.dot(x) + b0
def update(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])
u_b0 = b0 - self.eps * (i - b)
return u_w, u_b0
def request(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
return x - w.T.dot(i - b),
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = [
"w", "b"
]
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
|
normal
|
{
"blob_id": "ec9a152e39a0c51319e4db58eea4496cff5b2fd6",
"index": 3427,
"step-1": "<mask token>\n\n\nclass Linear(functions.Learn):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-2": "<mask token>\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n return w.dot(x) + b0\n <mask token>\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-3": "<mask token>\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n return w.dot(x) + b0\n\n def update(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])\n u_b0 = b0 - self.eps * (i - b)\n return u_w, u_b0\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-4": "import numpy\nfrom nn_functor import functions\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n return w.dot(x) + b0\n\n def update(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])\n u_b0 = b0 - self.eps * (i - b)\n return u_w, u_b0\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n x = a[0]\n w, b0 = p\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n self.param_name = ['w', 'b']\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-5": "import numpy\n\nfrom nn_functor import functions\n\n\nclass Linear(functions.Learn):\n\n def implement(self, a, p):\n x = a[0]\n w, b0 = p\n\n return w.dot(x) + b0\n\n def update(self, a, b, p):\n i = self.implement(a, p)\n\n x = a[0]\n w, b0 = p\n\n u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])\n u_b0 = b0 - self.eps * (i - b)\n return u_w, u_b0\n\n def request(self, a, b, p):\n i = self.implement(a, p)\n\n x = a[0]\n w, b0 = p\n\n return x - w.T.dot(i - b),\n\n\nclass LinearNode(functions.Node):\n\n def __init__(self, in_size, out_size, eps):\n super().__init__(Linear(eps))\n\n self.param_name = [\n \"w\", \"b\"\n ]\n\n self.w = numpy.random.randn(out_size, in_size)\n self.b = numpy.random.randn(out_size)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# The purpose of this bot is to cick the first black pixel.
# Testing a change here done by Git.
# changes through branches
import pyautogui
import keyboard
import win32api
import win32con
import time
# click function, with a 0.01 pause inorder to properly run the script
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
# pressing 's' to stop the function
while keyboard.is_pressed('s') == False:
# If the pixel is black (0), click on that pixel
if pyautogui.pixel(xPosition, yPosition)[0] == 0:
click(xPosition, yPosition)
|
normal
|
{
"blob_id": "9f831b8c90dd428879319b63712bd03fcc01b631",
"index": 8212,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n\nwhile keyboard.is_pressed('s') == False:\n if pyautogui.pixel(xPosition, yPosition)[0] == 0:\n click(xPosition, yPosition)\n",
"step-4": "import pyautogui\nimport keyboard\nimport win32api\nimport win32con\nimport time\n\n\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n\nwhile keyboard.is_pressed('s') == False:\n if pyautogui.pixel(xPosition, yPosition)[0] == 0:\n click(xPosition, yPosition)\n",
"step-5": "# The purpose of this bot is to cick the first black pixel.\r\n# Testing a change here done by Git. \r\n# changes through branches\r\n\r\nimport pyautogui\r\nimport keyboard\r\nimport win32api\r\nimport win32con\r\nimport time\r\n\r\n# click function, with a 0.01 pause inorder to properly run the script\r\n\r\n\r\ndef click(x, y):\r\n win32api.SetCursorPos((x, y))\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\r\n time.sleep(0.01)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\r\n\r\n\r\n# pressing 's' to stop the function\r\n\r\nwhile keyboard.is_pressed('s') == False:\r\n\r\n # If the pixel is black (0), click on that pixel\r\n\r\n if pyautogui.pixel(xPosition, yPosition)[0] == 0:\r\n click(xPosition, yPosition)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/14 下午6:06
# @Author : Huang HUi
# @Site :
# @File : Longest Common Prefix.py
# @Software: PyCharm
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ''
if len(strs)==1 :
return strs
res=[]
min_=strs[0]
for i in range(len(strs)):
if min_>strs[i]:
min_=strs[i]
for i in range(len(min_)):
count=0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count+=1
if count==len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
if __name__ == '__main__':
a=["abc","abcc","asc","abcd"]
b=["c","c"]
print(Solution().longestCommonPrefix(b))
|
normal
|
{
"blob_id": "1aed8e92a31ee42a3a609123af927f7074598ec1",
"index": 1820,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n if len(strs) == 1:\n return strs\n res = []\n min_ = strs[0]\n for i in range(len(strs)):\n if min_ > strs[i]:\n min_ = strs[i]\n for i in range(len(min_)):\n count = 0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count += 1\n if count == len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\n<mask token>\n",
"step-4": "class Solution(object):\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n if len(strs) == 1:\n return strs\n res = []\n min_ = strs[0]\n for i in range(len(strs)):\n if min_ > strs[i]:\n min_ = strs[i]\n for i in range(len(min_)):\n count = 0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count += 1\n if count == len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\nif __name__ == '__main__':\n a = ['abc', 'abcc', 'asc', 'abcd']\n b = ['c', 'c']\n print(Solution().longestCommonPrefix(b))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/7/14 下午6:06\n# @Author : Huang HUi\n# @Site : \n# @File : Longest Common Prefix.py\n# @Software: PyCharm\n\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs)==0:\n return ''\n if len(strs)==1 :\n return strs\n res=[]\n min_=strs[0]\n for i in range(len(strs)):\n if min_>strs[i]:\n min_=strs[i]\n for i in range(len(min_)):\n count=0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count+=1\n if count==len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n a=[\"abc\",\"abcc\",\"asc\",\"abcd\"]\n b=[\"c\",\"c\"]\n print(Solution().longestCommonPrefix(b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Every block element test will be automatically
wrapped inside `<p></p>\n`. Thats why every block
test should include this wrapper tag.
"""
from io import BytesIO
from unittest import TestCase
from unittest.mock import patch, Mock
import pytest
from django.core.files import File
from django_dynamic_fixture import G
from magplan.models import Attachment
from magplan.xmd.renderer import XMDRenderer
from magplan.xmd.mappers import plan_internal_mapper
@pytest.mark.django_db
class TestImage(TestCase):
MOCK_SRC = 'dummy.jpg'
MOCK_TITLE = 'title'
MOCK_ALT_TEXT = 'alt_text'
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])
self.expected_html = (
'<figure>'
'<img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption>'
'</figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)
assert html == self.expected_html
|
normal
|
{
"blob_id": "e5bf57e7a171f7e42928b78d09dda7593a231cf9",
"index": 9841,
"step-1": "<mask token>\n\n\[email protected]_db\nclass TestImage(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n <mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_db\nclass TestImage(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-3": "<mask token>\n\n\[email protected]_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-4": "<mask token>\nfrom io import BytesIO\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\nimport pytest\nfrom django.core.files import File\nfrom django_dynamic_fixture import G\nfrom magplan.models import Attachment\nfrom magplan.xmd.renderer import XMDRenderer\nfrom magplan.xmd.mappers import plan_internal_mapper\n\n\[email protected]_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-5": "\"\"\"\nEvery block element test will be automatically\nwrapped inside `<p></p>\\n`. Thats why every block\n\ntest should include this wrapper tag.\n\"\"\"\nfrom io import BytesIO\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\n\nimport pytest\nfrom django.core.files import File\nfrom django_dynamic_fixture import G\n\nfrom magplan.models import Attachment\nfrom magplan.xmd.renderer import XMDRenderer\nfrom magplan.xmd.mappers import plan_internal_mapper\n\n\[email protected]_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)\n\n self.mock_image_mapper = Mock()\n\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])\n\n self.expected_html = (\n '<figure>'\n '<img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption>'\n '</figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)\n\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)\n assert html == self.expected_html\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
quick and dirty remote shell using sockets and file descriptors
'''
import socket
import os
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('',8082))
s.listen(1)
conn,__=s.accept()
os.dup2(conn.fileno(),0)
os.dup2(conn.fileno(),1)
#print("asdf")
os.system('/bin/bash')
conn.close()
|
normal
|
{
"blob_id": "38a2113c0531648a90cf70c4b18d640d5ebb3f47",
"index": 5637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.bind(('', 8082))\ns.listen(1)\n<mask token>\nos.dup2(conn.fileno(), 0)\nos.dup2(conn.fileno(), 1)\nos.system('/bin/bash')\nconn.close()\n",
"step-3": "<mask token>\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('', 8082))\ns.listen(1)\nconn, __ = s.accept()\nos.dup2(conn.fileno(), 0)\nos.dup2(conn.fileno(), 1)\nos.system('/bin/bash')\nconn.close()\n",
"step-4": "<mask token>\nimport socket\nimport os\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('', 8082))\ns.listen(1)\nconn, __ = s.accept()\nos.dup2(conn.fileno(), 0)\nos.dup2(conn.fileno(), 1)\nos.system('/bin/bash')\nconn.close()\n",
"step-5": "'''\nquick and dirty remote shell using sockets and file descriptors\n'''\nimport socket\nimport os\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind(('',8082))\n\ns.listen(1)\n\nconn,__=s.accept()\n\nos.dup2(conn.fileno(),0)\nos.dup2(conn.fileno(),1)\n\n#print(\"asdf\")\nos.system('/bin/bash')\n\t\nconn.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = 'gaa8664'
import pymssql
class Connection:
def __init__(self):
self.connection = pymssql.connect(server = 'gditsn033\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close()
|
normal
|
{
"blob_id": "12dc248a95a84603065e23ce8fd33163bfcd2d3e",
"index": 9295,
"step-1": "<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-3": "__author__ = 'gaa8664'\n<mask token>\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-4": "__author__ = 'gaa8664'\nimport pymssql\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server='gditsn033\\\\SQLPROD',\n database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()\n",
"step-5": "__author__ = 'gaa8664'\nimport pymssql\n\n\nclass Connection:\n\n def __init__(self):\n self.connection = pymssql.connect(server = 'gditsn033\\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')\n\n def __enter__(self):\n self.cursor = self.connection.cursor()\n return self.cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cursor.close()\n self.connection.close()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.