code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
'''
runSPP.py - wrap spp peak caller
========================================
:Tags: Python
Purpose
-------
Runs the spp peak caller.
The workflow follows the tutorial at:
http://compbio.med.harvard.edu/Supplements/ChIP-seq/tutorial.html
Usage
-----
Documentation
-------------
Requirements:
* spp >= ?
* snow >= 0.3.13
* bedtools >= 2.21.0
Code
----
'''
import os
import sys
import subprocess
import collections
from cgatcore import experiment as E
from rpy2.robjects import r as R
def bamToBed(infile, outfile):
'''convert bam to bed with bedtools.'''
statement = "bamToBed -i %(infile)s > %(outfile)s" % locals()
E.debug("executing statement '%s'" % statement)
retcode = subprocess.call(statement,
cwd=os.getcwd(),
shell=True)
if retcode < 0:
raise OSError("Child was terminated by signal %i: \n%s\n" %
(-retcode, statement))
return outfile
SPPPeak = collections.namedtuple(
"SPPPeak",
"contig unrefined_start unrefined_end strand "
"posterior summit height refined_start refined_end median fdr")
def iteratePeaks(infile):
'''iterate of zinba peaks in infile.'''
for line in infile:
if line.startswith("#"):
continue
if line.startswith("PEAKID\tChrom"):
continue
# skip empty lines
if line.startswith("\n"):
continue
data = line[:-1].split("\t")
if len(data) != 12:
raise ValueError("could not parse line %s" % line)
# I assume these are 1-based coordinates
data[2] = max(int(data[2]) - 1, 0)
# end
data[3] = int(data[3])
# posterior
data[5] = float(data[5])
# summit
data[6] = max(int(data[6]) - 1, 0)
# height
data[7] = int(data[7])
# refined_start
data[8] = max(int(data[8]) - 1, 0)
# end
data[9] = int(data[9])
# median
data[10] = int(data[10])
# qvalue
data[11] = float(data[11])
yield SPPPeak._make(data[1:])
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-f", "--input-format", dest="input_format",
type="choice",
choices=("bam",),
help="input file format [default=%default].")
parser.add_option("-w", "--window-size", dest="window_size", type="int",
help="window size [default=%default].")
parser.add_option("-c", "--control-filename",
dest="control_filename",
type="string",
help="filename of input/control data in "
"bed format [default=%default].")
parser.add_option("-t", "--threads", dest="threads", type="int",
help="number of threads to use [default=%default].")
parser.add_option("-q", "--fdr-threshold",
dest="fdr_threshold", type="float",
help="fdr threshold [default=%default].")
parser.add_option("-z", "--spp-z-threshold", dest="z_threshold", type="float",
help="z threshold [default=%default].")
parser.add_option("--bin", dest="bin", type="int",
help="bin tags within the specified number "
" of basepairs to speed up calculation;"
" increasing bin size decreases the accuracy "
"of the determined parameters [default=%default]")
parser.add_option("--spp-srange-min", dest="srange_min", type="float",
help="srange gives the possible range for the "
" size of the protected region;"
" srange should be higher than tag length; "
" making the upper boundary too high"
" will increase calculation time [%default]")
parser.add_option("--spp-srange-max", dest="srange_max", type="float",
help="srange gives the possible range for the "
" size of the protected region;"
" srange should be higher than tag length; "
" making the upper boundary too high"
" will increase calculation time [%default]")
parser.set_defaults(
input_format="bam",
threads=1,
fdr_threshold=0.05,
window_size=1000,
offset=125,
srange_min=50,
srange_max=500,
bin=5,
z_threshold=3,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.start(parser, argv=argv)
if len(args) != 2:
raise ValueError(
"please specify a filename with sample data and an output file")
filename_sample, filename_output = args[0], args[1]
filename_control = options.control_filename
# load Zinba
R.library('spp')
R.library('snow')
# read data
E.info("reading data")
R('''chip.data <- read.bam.tags('%s')''' % filename_sample)
R('''input.data <- read.bam.tags('%s')''' % filename_control)
R('''cluster = makeCluster( %i )''' % (options.threads))
E.info("computing binding characteristics")
# get binding info from cross-correlation profile
# srange gives the possible range for the size of the protected region;
# srange should be higher than tag length; making the upper boundary too
# high will increase calculation time
# bin - bin tags within the specified number of basepairs to speed
# up calculation; increasing bin size decreases the accuracy of
# the determined parameters
srange_min, srange_max = options.srange_min, options.srange_max
bin = options.bin
R('''binding.characteristics <- get.binding.characteristics(chip.data,
srange=c(%(srange_min)i,%(srange_max)i),
bin=%(bin)s,
cluster=cluster);''' % locals())
# print out binding peak separation distance
options.stdout.write(
"shift\t%i\n" % R('''binding.characteristics$peak$x''')[0])
##################################################
##################################################
##################################################
E.info("plot cross correlation profile")
# plot cross-correlation profile
R('''pdf(file="%s.crosscorrelation.pdf",width=5,height=5)''' %
filename_output)
R('''par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);''')
R('''plot(binding.characteristics$cross.correlation,
type='l',
xlab="strand shift",
ylab="cross-correlation");''')
R('''abline(v=binding.characteristics$peak$x,lty=2,col=2)''')
R('''dev.off();''')
E.info("selecting informative tags based on the binding characteristics")
# select informative tags based on the binding characteristics
R('''chip.data <- select.informative.tags(
chip.data,binding.characteristics);''')
R('''input.data <- select.informative.tags(
input.data,binding.characteristics);''')
E.info("outputting broad peaks")
window_size, z_threshold = options.window_size, options.z_threshold
R('''broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,
window.size=%(window_size)i,
z.thr=%(z_threshold)f,
tag.shift=round(binding.characteristics$peak$x/2))''' % locals())
# write out in broadPeak format
R('''write.broadpeak.info(broad.clusters,"%s.broadpeak.txt")''' %
filename_output)
# binding detection parameters desired FDR (1%). Alternatively, an
# E-value can be supplied to the method calls below instead of the
# fdr parameter the binding.characteristics contains the optimized
# half-size for binding detection window
R('''detection.window.halfsize <- binding.characteristics$whs;''')
# determine binding positions using wtd method
E.info("determining binding positions using wtd method")
fdr = options.fdr_threshold
R('''bp <- find.binding.positions(
signal.data=chip.data,control.data=input.data,
fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)''' % locals())
options.stdout.write("detected_peaks\t%i\n" % R(
'''sum(unlist(lapply(bp$npl,function(d) length(d$x))))''')[0])
# output detected binding positions
R('''output.binding.results(bp,"%s.summit.txt");''' % filename_output)
R('''bp <- add.broad.peak.regions(chip.data,input.data,bp,
window.size=%(window_size)i,z.thr=%(z_threshold)f)''' % locals())
# output using narrowPeak format
R('''write.narrowpeak.binding(bp,"%s.narrowpeak.txt")''' %
filename_output)
# write footer and output benchmark information.
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
normal
|
{
"blob_id": "e886b88a0b7e8c06772fe8a9554cab1bfe9e94a7",
"index": 7208,
"step-1": "<mask token>\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\n<mask token>\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\n<mask token>\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-3": "<mask token>\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\nSPPPeak = collections.namedtuple('SPPPeak',\n 'contig unrefined_start unrefined_end strand posterior summit height refined_start refined_end median fdr'\n )\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-4": "<mask token>\nimport os\nimport sys\nimport subprocess\nimport collections\nfrom cgatcore import experiment as E\nfrom rpy2.robjects import r as R\n\n\ndef bamToBed(infile, outfile):\n \"\"\"convert bam to bed with bedtools.\"\"\"\n statement = 'bamToBed -i %(infile)s > %(outfile)s' % locals()\n E.debug(\"executing statement '%s'\" % statement)\n retcode = subprocess.call(statement, cwd=os.getcwd(), shell=True)\n if retcode < 0:\n raise OSError('Child was terminated by signal %i: \\n%s\\n' % (-\n retcode, statement))\n return outfile\n\n\nSPPPeak = collections.namedtuple('SPPPeak',\n 'contig unrefined_start unrefined_end strand posterior summit height refined_start refined_end median fdr'\n )\n\n\ndef iteratePeaks(infile):\n \"\"\"iterate of zinba peaks in infile.\"\"\"\n for line in infile:\n if line.startswith('#'):\n continue\n if line.startswith('PEAKID\\tChrom'):\n continue\n if line.startswith('\\n'):\n continue\n data = line[:-1].split('\\t')\n if len(data) != 12:\n raise ValueError('could not parse line %s' % line)\n data[2] = max(int(data[2]) - 1, 0)\n data[3] = int(data[3])\n data[5] = float(data[5])\n data[6] = max(int(data[6]) - 1, 0)\n data[7] = int(data[7])\n data[8] = max(int(data[8]) - 1, 0)\n data[9] = int(data[9])\n data[10] = int(data[10])\n data[11] = float(data[11])\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n if not argv:\n argv = sys.argv\n parser = E.OptionParser(version='%prog version: $Id$', usage=globals()[\n '__doc__'])\n parser.add_option('-f', '--input-format', dest='input_format', type=\n 'choice', choices=('bam',), help=\n 'input file format [default=%default].')\n parser.add_option('-w', '--window-size', dest='window_size', type='int',\n help='window size [default=%default].')\n parser.add_option('-c', '--control-filename', dest='control_filename',\n type='string', help=\n 'filename of input/control data in bed format [default=%default].')\n parser.add_option('-t', '--threads', dest='threads', type='int', help=\n 'number of threads to use [default=%default].')\n parser.add_option('-q', '--fdr-threshold', dest='fdr_threshold', type=\n 'float', help='fdr threshold [default=%default].')\n parser.add_option('-z', '--spp-z-threshold', dest='z_threshold', type=\n 'float', help='z threshold [default=%default].')\n parser.add_option('--bin', dest='bin', type='int', help=\n 'bin tags within the specified number of basepairs to speed up calculation; increasing bin size decreases the accuracy of the determined parameters [default=%default]'\n )\n parser.add_option('--spp-srange-min', dest='srange_min', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.add_option('--spp-srange-max', dest='srange_max', type='float',\n help=\n 'srange gives the possible range for the size of the protected region; srange should be higher than tag length; making the upper boundary too high will increase calculation time [%default]'\n )\n parser.set_defaults(input_format='bam', threads=1, fdr_threshold=0.05,\n window_size=1000, offset=125, srange_min=50, srange_max=500, bin=5,\n z_threshold=3)\n options, args = E.start(parser, argv=argv)\n if len(args) != 2:\n raise ValueError(\n 'please specify a filename with sample data and an output file')\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n R.library('spp')\n R.library('snow')\n E.info('reading data')\n R(\"chip.data <- read.bam.tags('%s')\" % filename_sample)\n R(\"input.data <- read.bam.tags('%s')\" % filename_control)\n R('cluster = makeCluster( %i )' % options.threads)\n E.info('computing binding characteristics')\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R(\n \"\"\"binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);\"\"\"\n % locals())\n options.stdout.write('shift\\t%i\\n' % R('binding.characteristics$peak$x')[0]\n )\n E.info('plot cross correlation profile')\n R('pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)' % filename_output)\n R('par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);')\n R(\"\"\"plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");\"\"\"\n )\n R('abline(v=binding.characteristics$peak$x,lty=2,col=2)')\n R('dev.off();')\n E.info('selecting informative tags based on the binding characteristics')\n R(\"\"\"chip.data <- select.informative.tags(\n chip.data,binding.characteristics);\"\"\"\n )\n R(\"\"\"input.data <- select.informative.tags(\n input.data,binding.characteristics);\"\"\"\n )\n E.info('outputting broad peaks')\n window_size, z_threshold = options.window_size, options.z_threshold\n R(\n \"\"\"broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))\"\"\"\n % locals())\n R('write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")' %\n filename_output)\n R('detection.window.halfsize <- binding.characteristics$whs;')\n E.info('determining binding positions using wtd method')\n fdr = options.fdr_threshold\n R(\n \"\"\"bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)\"\"\"\n % locals())\n options.stdout.write('detected_peaks\\t%i\\n' % R(\n 'sum(unlist(lapply(bp$npl,function(d) length(d$x))))')[0])\n R('output.binding.results(bp,\"%s.summit.txt\");' % filename_output)\n R(\n \"\"\"bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)\"\"\"\n % locals())\n R('write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")' % filename_output)\n E.stop()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-5": "'''\nrunSPP.py - wrap spp peak caller\n========================================\n\n:Tags: Python\n\nPurpose\n-------\n\nRuns the spp peak caller.\n\nThe workflow follows the tutorial at:\n\nhttp://compbio.med.harvard.edu/Supplements/ChIP-seq/tutorial.html\n\nUsage\n-----\n\nDocumentation\n-------------\n\nRequirements:\n\n* spp >= ?\n* snow >= 0.3.13\n* bedtools >= 2.21.0\n\nCode\n----\n\n'''\n\nimport os\nimport sys\nimport subprocess\nimport collections\n\nfrom cgatcore import experiment as E\n\nfrom rpy2.robjects import r as R\n\n\ndef bamToBed(infile, outfile):\n '''convert bam to bed with bedtools.'''\n\n statement = \"bamToBed -i %(infile)s > %(outfile)s\" % locals()\n\n E.debug(\"executing statement '%s'\" % statement)\n\n retcode = subprocess.call(statement,\n cwd=os.getcwd(),\n shell=True)\n if retcode < 0:\n raise OSError(\"Child was terminated by signal %i: \\n%s\\n\" %\n (-retcode, statement))\n\n return outfile\n\nSPPPeak = collections.namedtuple(\n \"SPPPeak\",\n \"contig unrefined_start unrefined_end strand \"\n \"posterior summit height refined_start refined_end median fdr\")\n\n\ndef iteratePeaks(infile):\n '''iterate of zinba peaks in infile.'''\n\n for line in infile:\n\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"PEAKID\\tChrom\"):\n continue\n # skip empty lines\n if line.startswith(\"\\n\"):\n continue\n\n data = line[:-1].split(\"\\t\")\n\n if len(data) != 12:\n raise ValueError(\"could not parse line %s\" % line)\n\n # I assume these are 1-based coordinates\n data[2] = max(int(data[2]) - 1, 0)\n # end\n data[3] = int(data[3])\n # posterior\n data[5] = float(data[5])\n # summit\n data[6] = max(int(data[6]) - 1, 0)\n # height\n data[7] = int(data[7])\n # refined_start\n data[8] = max(int(data[8]) - 1, 0)\n # end\n data[9] = int(data[9])\n # median\n data[10] = int(data[10])\n # qvalue\n data[11] = float(data[11])\n\n yield SPPPeak._make(data[1:])\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-f\", \"--input-format\", dest=\"input_format\",\n type=\"choice\",\n choices=(\"bam\",),\n help=\"input file format [default=%default].\")\n\n parser.add_option(\"-w\", \"--window-size\", dest=\"window_size\", type=\"int\",\n help=\"window size [default=%default].\")\n\n parser.add_option(\"-c\", \"--control-filename\",\n dest=\"control_filename\",\n type=\"string\",\n help=\"filename of input/control data in \"\n \"bed format [default=%default].\")\n\n parser.add_option(\"-t\", \"--threads\", dest=\"threads\", type=\"int\",\n help=\"number of threads to use [default=%default].\")\n\n parser.add_option(\"-q\", \"--fdr-threshold\",\n dest=\"fdr_threshold\", type=\"float\",\n help=\"fdr threshold [default=%default].\")\n\n parser.add_option(\"-z\", \"--spp-z-threshold\", dest=\"z_threshold\", type=\"float\",\n help=\"z threshold [default=%default].\")\n\n parser.add_option(\"--bin\", dest=\"bin\", type=\"int\",\n help=\"bin tags within the specified number \"\n \" of basepairs to speed up calculation;\"\n \" increasing bin size decreases the accuracy \"\n \"of the determined parameters [default=%default]\")\n\n parser.add_option(\"--spp-srange-min\", dest=\"srange_min\", type=\"float\",\n help=\"srange gives the possible range for the \"\n \" size of the protected region;\"\n \" srange should be higher than tag length; \"\n \" making the upper boundary too high\"\n \" will increase calculation time [%default]\")\n\n parser.add_option(\"--spp-srange-max\", dest=\"srange_max\", type=\"float\",\n help=\"srange gives the possible range for the \"\n \" size of the protected region;\"\n \" srange should be higher than tag length; \"\n \" making the upper boundary too high\"\n \" will increase calculation time [%default]\")\n\n parser.set_defaults(\n input_format=\"bam\",\n threads=1,\n fdr_threshold=0.05,\n window_size=1000,\n offset=125,\n srange_min=50,\n srange_max=500,\n bin=5,\n z_threshold=3,\n )\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.start(parser, argv=argv)\n\n if len(args) != 2:\n raise ValueError(\n \"please specify a filename with sample data and an output file\")\n\n filename_sample, filename_output = args[0], args[1]\n filename_control = options.control_filename\n\n # load Zinba\n R.library('spp')\n R.library('snow')\n\n # read data\n E.info(\"reading data\")\n R('''chip.data <- read.bam.tags('%s')''' % filename_sample)\n R('''input.data <- read.bam.tags('%s')''' % filename_control)\n R('''cluster = makeCluster( %i )''' % (options.threads))\n\n E.info(\"computing binding characteristics\")\n # get binding info from cross-correlation profile\n\n # srange gives the possible range for the size of the protected region;\n # srange should be higher than tag length; making the upper boundary too\n # high will increase calculation time\n\n # bin - bin tags within the specified number of basepairs to speed\n # up calculation; increasing bin size decreases the accuracy of\n # the determined parameters\n srange_min, srange_max = options.srange_min, options.srange_max\n bin = options.bin\n R('''binding.characteristics <- get.binding.characteristics(chip.data,\n srange=c(%(srange_min)i,%(srange_max)i),\n bin=%(bin)s,\n cluster=cluster);''' % locals())\n # print out binding peak separation distance\n options.stdout.write(\n \"shift\\t%i\\n\" % R('''binding.characteristics$peak$x''')[0])\n\n ##################################################\n ##################################################\n ##################################################\n E.info(\"plot cross correlation profile\")\n # plot cross-correlation profile\n R('''pdf(file=\"%s.crosscorrelation.pdf\",width=5,height=5)''' %\n filename_output)\n R('''par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);''')\n R('''plot(binding.characteristics$cross.correlation,\n type='l',\n xlab=\"strand shift\",\n ylab=\"cross-correlation\");''')\n R('''abline(v=binding.characteristics$peak$x,lty=2,col=2)''')\n R('''dev.off();''')\n\n E.info(\"selecting informative tags based on the binding characteristics\")\n # select informative tags based on the binding characteristics\n R('''chip.data <- select.informative.tags(\n chip.data,binding.characteristics);''')\n R('''input.data <- select.informative.tags(\n input.data,binding.characteristics);''')\n\n E.info(\"outputting broad peaks\")\n window_size, z_threshold = options.window_size, options.z_threshold\n R('''broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,\n window.size=%(window_size)i,\n z.thr=%(z_threshold)f,\n tag.shift=round(binding.characteristics$peak$x/2))''' % locals())\n # write out in broadPeak format\n R('''write.broadpeak.info(broad.clusters,\"%s.broadpeak.txt\")''' %\n filename_output)\n\n # binding detection parameters desired FDR (1%). Alternatively, an\n # E-value can be supplied to the method calls below instead of the\n # fdr parameter the binding.characteristics contains the optimized\n # half-size for binding detection window\n R('''detection.window.halfsize <- binding.characteristics$whs;''')\n\n # determine binding positions using wtd method\n E.info(\"determining binding positions using wtd method\")\n fdr = options.fdr_threshold\n R('''bp <- find.binding.positions(\n signal.data=chip.data,control.data=input.data,\n fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)''' % locals())\n options.stdout.write(\"detected_peaks\\t%i\\n\" % R(\n '''sum(unlist(lapply(bp$npl,function(d) length(d$x))))''')[0])\n\n # output detected binding positions\n R('''output.binding.results(bp,\"%s.summit.txt\");''' % filename_output)\n\n R('''bp <- add.broad.peak.regions(chip.data,input.data,bp,\n window.size=%(window_size)i,z.thr=%(z_threshold)f)''' % locals())\n # output using narrowPeak format\n R('''write.narrowpeak.binding(bp,\"%s.narrowpeak.txt\")''' %\n filename_output)\n\n # write footer and output benchmark information.\n E.stop()\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
name="/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv"
name_bkg="/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv"
drop_cols=[0,1,2,15]
names = [i for i in range(16)]
#columns=[] #list of columns we want to take
file_df_sig=pd.read_csv(name, sep=",",names=names)
tmp_df_sig = file_df_sig.drop(drop_cols, axis=1)
file_df_bkg = pd.read_csv(name_bkg, sep=",",names=names)
tmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)
tmp_df = pd.concat([tmp_df_sig , tmp_df_bkg] , ignore_index=True)
#fig , ax = plt.subplots()
#tmp_df.hist(bins=10,ax=ax)
#fig.savefig("before_pca.pdf")
pca=PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)
pca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)
#fig , ax = plt.subplots()
#df.hist(bins=10,ax=ax)
#fig.savefig("after_pca.pdf")
final_df= pd.concat([file_df_sig , file_df_bkg] , ignore_index=True)
print("Before PCA" , final_df)
for i in pca_df.columns :
final_df[i]=pca_df[i]
print("After PCA" , final_df)
cut=len(file_df_sig.index)
final_df.iloc[:cut].to_csv("pca_stop_train_sig_wc.csv",header= False,index=False)
final_df.iloc[cut:].to_csv("pca_stop_train_bkg_wc.csv",header= False , index =False)
|
normal
|
{
"blob_id": "f8bb2851192a53e94e503c0c63b17477878ad9a7",
"index": 6926,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Before PCA', final_df)\nfor i in pca_df.columns:\n final_df[i] = pca_df[i]\nprint('After PCA', final_df)\n<mask token>\nfinal_df.iloc[:cut].to_csv('pca_stop_train_sig_wc.csv', header=False, index\n =False)\nfinal_df.iloc[cut:].to_csv('pca_stop_train_bkg_wc.csv', header=False, index\n =False)\n",
"step-3": "<mask token>\nname = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv'\nname_bkg = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv'\ndrop_cols = [0, 1, 2, 15]\nnames = [i for i in range(16)]\nfile_df_sig = pd.read_csv(name, sep=',', names=names)\ntmp_df_sig = file_df_sig.drop(drop_cols, axis=1)\nfile_df_bkg = pd.read_csv(name_bkg, sep=',', names=names)\ntmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)\ntmp_df = pd.concat([tmp_df_sig, tmp_df_bkg], ignore_index=True)\npca = PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)\npca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)\nfinal_df = pd.concat([file_df_sig, file_df_bkg], ignore_index=True)\nprint('Before PCA', final_df)\nfor i in pca_df.columns:\n final_df[i] = pca_df[i]\nprint('After PCA', final_df)\ncut = len(file_df_sig.index)\nfinal_df.iloc[:cut].to_csv('pca_stop_train_sig_wc.csv', header=False, index\n =False)\nfinal_df.iloc[cut:].to_csv('pca_stop_train_bkg_wc.csv', header=False, index\n =False)\n",
"step-4": "import os\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nname = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv'\nname_bkg = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv'\ndrop_cols = [0, 1, 2, 15]\nnames = [i for i in range(16)]\nfile_df_sig = pd.read_csv(name, sep=',', names=names)\ntmp_df_sig = file_df_sig.drop(drop_cols, axis=1)\nfile_df_bkg = pd.read_csv(name_bkg, sep=',', names=names)\ntmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)\ntmp_df = pd.concat([tmp_df_sig, tmp_df_bkg], ignore_index=True)\npca = PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)\npca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)\nfinal_df = pd.concat([file_df_sig, file_df_bkg], ignore_index=True)\nprint('Before PCA', final_df)\nfor i in pca_df.columns:\n final_df[i] = pca_df[i]\nprint('After PCA', final_df)\ncut = len(file_df_sig.index)\nfinal_df.iloc[:cut].to_csv('pca_stop_train_sig_wc.csv', header=False, index\n =False)\nfinal_df.iloc[cut:].to_csv('pca_stop_train_bkg_wc.csv', header=False, index\n =False)\n",
"step-5": "import os\r\nimport pandas as pd\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt \r\n\r\nname=\"/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv\"\r\nname_bkg=\"/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv\"\r\ndrop_cols=[0,1,2,15]\r\nnames = [i for i in range(16)]\r\n#columns=[] #list of columns we want to take\r\nfile_df_sig=pd.read_csv(name, sep=\",\",names=names)\r\ntmp_df_sig = file_df_sig.drop(drop_cols, axis=1)\r\n\r\nfile_df_bkg = pd.read_csv(name_bkg, sep=\",\",names=names)\r\ntmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)\r\n\r\ntmp_df = pd.concat([tmp_df_sig , tmp_df_bkg] , ignore_index=True)\r\n\r\n\r\n\r\n#fig , ax = plt.subplots()\r\n#tmp_df.hist(bins=10,ax=ax)\r\n#fig.savefig(\"before_pca.pdf\")\r\n\r\npca=PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)\r\n\r\n\r\n\r\npca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)\r\n\r\n#fig , ax = plt.subplots()\r\n#df.hist(bins=10,ax=ax)\r\n#fig.savefig(\"after_pca.pdf\")\r\n\r\nfinal_df= pd.concat([file_df_sig , file_df_bkg] , ignore_index=True)\r\n\r\nprint(\"Before PCA\" , final_df)\r\n\r\nfor i in pca_df.columns :\r\n\tfinal_df[i]=pca_df[i]\r\n\t\r\nprint(\"After PCA\" , final_df)\r\n\r\ncut=len(file_df_sig.index)\r\n\r\n\r\nfinal_df.iloc[:cut].to_csv(\"pca_stop_train_sig_wc.csv\",header= False,index=False)\r\nfinal_df.iloc[cut:].to_csv(\"pca_stop_train_bkg_wc.csv\",header= False , index =False)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_prime(num):
if num <= 1:
return False
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_prime(num):
if num <= 1:
return False
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
if __name__ == '__main__':
for i in range(M, N + 1):
if is_prime(i):
print(i)
<|reserved_special_token_1|>
M, N = map(int, input().split())
def is_prime(num):
if num <= 1:
return False
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
if __name__ == '__main__':
for i in range(M, N + 1):
if is_prime(i):
print(i)
<|reserved_special_token_1|>
M, N = map(int, input().split())
def is_prime(num):
if num <= 1:
return False
i = 2
while i * i <= num:
if num % i == 0:
return False
i += 1
return True
if __name__=="__main__":
for i in range(M, N+1):
if is_prime(i):
print(i)
|
flexible
|
{
"blob_id": "07fdf6605d970d2491116ad82a1119499b561d1f",
"index": 4144,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_prime(num):\n if num <= 1:\n return False\n i = 2\n while i * i <= num:\n if num % i == 0:\n return False\n i += 1\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_prime(num):\n if num <= 1:\n return False\n i = 2\n while i * i <= num:\n if num % i == 0:\n return False\n i += 1\n return True\n\n\nif __name__ == '__main__':\n for i in range(M, N + 1):\n if is_prime(i):\n print(i)\n",
"step-4": "M, N = map(int, input().split())\n\n\ndef is_prime(num):\n if num <= 1:\n return False\n i = 2\n while i * i <= num:\n if num % i == 0:\n return False\n i += 1\n return True\n\n\nif __name__ == '__main__':\n for i in range(M, N + 1):\n if is_prime(i):\n print(i)\n",
"step-5": "M, N = map(int, input().split())\n\ndef is_prime(num):\n\tif num <= 1:\n\t\treturn False\n\n\ti = 2\n\twhile i * i <= num:\n\t\tif num % i == 0:\n\t\t\treturn False\n\t\ti += 1\n\n\treturn True\n\nif __name__==\"__main__\":\n\tfor i in range(M, N+1):\n\t\tif is_prime(i):\n\t\t\tprint(i)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MatlabConfig(Controller):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MatlabConfig(Controller):
executable = File(Undefined, output=False, desc=
'Full path of the matlab executable')
def load_module(capsul_engine, module_name):
capsul_engine.add_trait('matlab', Instance(MatlabConfig))
capsul_engine.matlab = MatlabConfig()
capsul_engine.matlab.on_trait_change(SomaPartial(
update_execution_context, weakref.proxy(capsul_engine)))
<|reserved_special_token_0|>
def update_execution_context(capsul_engine):
if capsul_engine.matlab.executable is not Undefined:
capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'
] = capsul_engine.matlab.executable
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MatlabConfig(Controller):
executable = File(Undefined, output=False, desc=
'Full path of the matlab executable')
def load_module(capsul_engine, module_name):
capsul_engine.add_trait('matlab', Instance(MatlabConfig))
capsul_engine.matlab = MatlabConfig()
capsul_engine.matlab.on_trait_change(SomaPartial(
update_execution_context, weakref.proxy(capsul_engine)))
def init_module(capul_engine, module_name, loaded_module):
pass
def update_execution_context(capsul_engine):
if capsul_engine.matlab.executable is not Undefined:
capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'
] = capsul_engine.matlab.executable
<|reserved_special_token_1|>
import weakref
from soma.controller import Controller
from soma.functiontools import SomaPartial
from traits.api import File, Undefined, Instance
class MatlabConfig(Controller):
executable = File(Undefined, output=False, desc=
'Full path of the matlab executable')
def load_module(capsul_engine, module_name):
capsul_engine.add_trait('matlab', Instance(MatlabConfig))
capsul_engine.matlab = MatlabConfig()
capsul_engine.matlab.on_trait_change(SomaPartial(
update_execution_context, weakref.proxy(capsul_engine)))
def init_module(capul_engine, module_name, loaded_module):
pass
def update_execution_context(capsul_engine):
if capsul_engine.matlab.executable is not Undefined:
capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'
] = capsul_engine.matlab.executable
<|reserved_special_token_1|>
import weakref
from soma.controller import Controller
from soma.functiontools import SomaPartial
from traits.api import File, Undefined, Instance
class MatlabConfig(Controller):
executable = File(Undefined, output=False,
desc='Full path of the matlab executable')
def load_module(capsul_engine, module_name):
capsul_engine.add_trait('matlab', Instance(MatlabConfig))
capsul_engine.matlab = MatlabConfig()
capsul_engine.matlab.on_trait_change(SomaPartial(update_execution_context,
weakref.proxy(capsul_engine)))
def init_module(capul_engine, module_name, loaded_module):
pass
def update_execution_context(capsul_engine):
if capsul_engine.matlab.executable is not Undefined:
capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'] \
= capsul_engine.matlab.executable
|
flexible
|
{
"blob_id": "4a8e8994ec8734664a5965b81da9d146d8504f8d",
"index": 6096,
"step-1": "<mask token>\n\n\nclass MatlabConfig(Controller):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\n<mask token>\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-3": "<mask token>\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-4": "import weakref\nfrom soma.controller import Controller\nfrom soma.functiontools import SomaPartial\nfrom traits.api import File, Undefined, Instance\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-5": "import weakref\n\nfrom soma.controller import Controller\nfrom soma.functiontools import SomaPartial\nfrom traits.api import File, Undefined, Instance\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False,\n desc='Full path of the matlab executable')\n \ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(update_execution_context, \n weakref.proxy(capsul_engine)))\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'] \\\n = capsul_engine.matlab.executable\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def keypress():
global keys
keys['space'] = keys['quit'] = keys['next'] = False
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys['space'] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys['quit'] = True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def keypress():
global keys
keys['space'] = keys['quit'] = keys['next'] = False
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys['space'] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys['quit'] = True
<|reserved_special_token_0|>
mlcd.init(16, 2)
<|reserved_special_token_0|>
while not done:
curtime = time.time()
if curtime - lasttime > 1 / game['speed']:
lasttime = curtime
if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][
player['position']] == OBSTACLE_CHAR:
player['score'] += 1
game['obstacle'] -= 1
game['level'] += 0.5
game['speed'] += 0.05
for lindex, lin in enumerate(screenbuff, start=0):
for index, pos in enumerate(lin, start=0):
if index > 0:
screenbuff[lindex][index - 1] = pos
screenbuff[0][-1] = ' '
screenbuff[1][-1] = ' '
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2
] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
if lin_temp == 1:
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
if lin_temp == 0:
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:
done = True
screenbuff[player['line']][player['position']] = PLAYER_CHAR
lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +
str(player['score'])]
mlcd.draw(lines)
screenbuff[player['line']][player['position']] = ' '
keypress()
if keys['space']:
if player['line'] == 0:
player['line'] = 1
else:
player['line'] = 0
if keys['quit']:
print('game quit')
done = True
pygame.quit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PLAYER_CHAR = '>'
OBSTACLE_CHAR = '|'
screenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]
player = {'position': 0, 'line': 0, 'score': 0}
game = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}
keys = {'space': False, 'quit': False, 'next': False}
def keypress():
global keys
keys['space'] = keys['quit'] = keys['next'] = False
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys['space'] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys['quit'] = True
done = False
mlcd.init(16, 2)
lasttime = time.time()
curtime = 0.0
while not done:
curtime = time.time()
if curtime - lasttime > 1 / game['speed']:
lasttime = curtime
if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][
player['position']] == OBSTACLE_CHAR:
player['score'] += 1
game['obstacle'] -= 1
game['level'] += 0.5
game['speed'] += 0.05
for lindex, lin in enumerate(screenbuff, start=0):
for index, pos in enumerate(lin, start=0):
if index > 0:
screenbuff[lindex][index - 1] = pos
screenbuff[0][-1] = ' '
screenbuff[1][-1] = ' '
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2
] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
if lin_temp == 1:
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
if lin_temp == 0:
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:
done = True
screenbuff[player['line']][player['position']] = PLAYER_CHAR
lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +
str(player['score'])]
mlcd.draw(lines)
screenbuff[player['line']][player['position']] = ' '
keypress()
if keys['space']:
if player['line'] == 0:
player['line'] = 1
else:
player['line'] = 0
if keys['quit']:
print('game quit')
done = True
pygame.quit()
<|reserved_special_token_1|>
import mlcd, pygame, time, random
PLAYER_CHAR = '>'
OBSTACLE_CHAR = '|'
screenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]
player = {'position': 0, 'line': 0, 'score': 0}
game = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}
keys = {'space': False, 'quit': False, 'next': False}
def keypress():
global keys
keys['space'] = keys['quit'] = keys['next'] = False
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys['space'] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys['quit'] = True
done = False
mlcd.init(16, 2)
lasttime = time.time()
curtime = 0.0
while not done:
curtime = time.time()
if curtime - lasttime > 1 / game['speed']:
lasttime = curtime
if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][
player['position']] == OBSTACLE_CHAR:
player['score'] += 1
game['obstacle'] -= 1
game['level'] += 0.5
game['speed'] += 0.05
for lindex, lin in enumerate(screenbuff, start=0):
for index, pos in enumerate(lin, start=0):
if index > 0:
screenbuff[lindex][index - 1] = pos
screenbuff[0][-1] = ' '
screenbuff[1][-1] = ' '
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2
] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
if lin_temp == 1:
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game['obstacle'] < int(game['level']) and random.choice([0, 1]):
lin_temp = random.choice([0, 1])
if lin_temp == 0:
screenbuff[lin_temp][-1] = OBSTACLE_CHAR
game['obstacle'] += 1
if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:
done = True
screenbuff[player['line']][player['position']] = PLAYER_CHAR
lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +
str(player['score'])]
mlcd.draw(lines)
screenbuff[player['line']][player['position']] = ' '
keypress()
if keys['space']:
if player['line'] == 0:
player['line'] = 1
else:
player['line'] = 0
if keys['quit']:
print('game quit')
done = True
pygame.quit()
<|reserved_special_token_1|>
import mlcd,pygame,time,random
PLAYER_CHAR=">"
OBSTACLE_CHAR="|"
screenbuff=[[" "," "," "," "," "," "," "," "," "," "," "," "],
[" "," "," "," "," "," "," "," "," "," "," "," "]]
player={"position":0,"line":0,"score":000}
game={"speed":4.05,"level":2.5,"obstacle":0}
keys={"space":False,"quit":False,"next":False}
def keypress(): #get keypresses
global keys
keys["space"]=keys["quit"]=keys["next"]=False #reset all keys
#check keys
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys["space"] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys["quit"] = True
done=False
#initialize mlcd as 16x2 character lcd
mlcd.init(16,2)
lasttime=time.time()
curtime=0.0
while not done:
curtime=time.time()
if (curtime-lasttime>1/game["speed"]):
lasttime=curtime
#increment score and count obstacle
#up the level and increase the speed
if screenbuff[0][player["position"]]==OBSTACLE_CHAR or screenbuff[1][player["position"]]==OBSTACLE_CHAR:
player["score"]+=1
game["obstacle"]-=1
game["level"]+=0.5
game["speed"]+=0.05
#if((game["level"]+2)%game["posmovthres"]==0 and player["position"]<12 and screenbuff[player["line"]][player["position"]+1]!=OBSTACLE_CHAR and screenbuff[player["line"]][player["position"]+2]!=OBSTACLE_CHAR):
# player["position"]+=1
#move everything one place to the left
for lindex,lin in enumerate(screenbuff,start=0):
for index,pos in enumerate(lin, start=0):
if index>0:
screenbuff[lindex][index-1]=pos
#add new chars at end of buff , obstacles if there is a gap
screenbuff[0][-1]=" "
screenbuff[1][-1]=" "
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==1):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==0):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
#check for collision
if screenbuff[player["line"]][player["position"]]==OBSTACLE_CHAR:
done=True #player lost
#add player to the buffer
screenbuff[player["line"]][player["position"]]=PLAYER_CHAR
#ready the lines for drawing on lcd
lines=[''.join(screenbuff[0]) + "|scr",
''.join(screenbuff[1]) + "|"+str(player["score"])]
mlcd.draw(lines)
#remove player from buffer
screenbuff[player["line"]][player["position"]]=" "
#get keypresses
keypress()
#modify player line (move the player) if space is pressed
if keys["space"]:
if player["line"]==0:
player["line"]=1
else:
player["line"]=0
#quit
if keys["quit"]:
print("game quit")
done=True
pygame.quit()
|
flexible
|
{
"blob_id": "aeaab602cbb9fa73992eb5259e8603ecb11ba333",
"index": 4863,
"step-1": "<mask token>\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\n<mask token>\nmlcd.init(16, 2)\n<mask token>\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-3": "<mask token>\nPLAYER_CHAR = '>'\nOBSTACLE_CHAR = '|'\nscreenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]\nplayer = {'position': 0, 'line': 0, 'score': 0}\ngame = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}\nkeys = {'space': False, 'quit': False, 'next': False}\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\ndone = False\nmlcd.init(16, 2)\nlasttime = time.time()\ncurtime = 0.0\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-4": "import mlcd, pygame, time, random\nPLAYER_CHAR = '>'\nOBSTACLE_CHAR = '|'\nscreenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]\nplayer = {'position': 0, 'line': 0, 'score': 0}\ngame = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}\nkeys = {'space': False, 'quit': False, 'next': False}\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\ndone = False\nmlcd.init(16, 2)\nlasttime = time.time()\ncurtime = 0.0\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-5": "import mlcd,pygame,time,random\n\nPLAYER_CHAR=\">\"\nOBSTACLE_CHAR=\"|\"\n\nscreenbuff=[[\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"],\n [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]]\n\nplayer={\"position\":0,\"line\":0,\"score\":000}\ngame={\"speed\":4.05,\"level\":2.5,\"obstacle\":0} \nkeys={\"space\":False,\"quit\":False,\"next\":False}\n\ndef keypress(): #get keypresses\n global keys\n keys[\"space\"]=keys[\"quit\"]=keys[\"next\"]=False #reset all keys\n #check keys\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys[\"space\"] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys[\"quit\"] = True\n\n \n \n\ndone=False\n#initialize mlcd as 16x2 character lcd\nmlcd.init(16,2)\nlasttime=time.time()\ncurtime=0.0\n\nwhile not done:\n curtime=time.time()\n if (curtime-lasttime>1/game[\"speed\"]):\n lasttime=curtime\n\n\n #increment score and count obstacle\n #up the level and increase the speed\n if screenbuff[0][player[\"position\"]]==OBSTACLE_CHAR or screenbuff[1][player[\"position\"]]==OBSTACLE_CHAR:\n player[\"score\"]+=1\n game[\"obstacle\"]-=1\n game[\"level\"]+=0.5\n game[\"speed\"]+=0.05\n #if((game[\"level\"]+2)%game[\"posmovthres\"]==0 and player[\"position\"]<12 and screenbuff[player[\"line\"]][player[\"position\"]+1]!=OBSTACLE_CHAR and screenbuff[player[\"line\"]][player[\"position\"]+2]!=OBSTACLE_CHAR):\n # player[\"position\"]+=1\n\n #move everything one place to the left\n for lindex,lin in enumerate(screenbuff,start=0):\n for index,pos in enumerate(lin, start=0):\n if index>0:\n screenbuff[lindex][index-1]=pos\n \n #add new chars at end of buff , obstacles if there is a gap\n screenbuff[0][-1]=\" \"\n screenbuff[1][-1]=\" \"\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n if(lin_temp==1):\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n if(lin_temp==0):\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n \n\n #check for collision\n if screenbuff[player[\"line\"]][player[\"position\"]]==OBSTACLE_CHAR:\n done=True #player lost\n #add player to the buffer\n screenbuff[player[\"line\"]][player[\"position\"]]=PLAYER_CHAR\n #ready the lines for drawing on lcd\n lines=[''.join(screenbuff[0]) + \"|scr\",\n ''.join(screenbuff[1]) + \"|\"+str(player[\"score\"])]\n mlcd.draw(lines)\n \n #remove player from buffer\n screenbuff[player[\"line\"]][player[\"position\"]]=\" \"\n #get keypresses\n keypress()\n #modify player line (move the player) if space is pressed\n if keys[\"space\"]:\n if player[\"line\"]==0:\n player[\"line\"]=1\n else:\n player[\"line\"]=0\n #quit\n if keys[\"quit\"]:\n print(\"game quit\")\n done=True\npygame.quit()\n \n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
__author__ = 'Administrator'
import socket,os,time
server = socket.socket()
server.bind(("localhost",9999))
server.listen()
while True:
conn,addr = server.accept()
while True:
data = conn.recv(1024)
if not data:
break
cmd,filename = data.decode().split()
if os.path.isfile(filename)
f = open(filename,"rb")
m = hashlib.md5()
file_size = os.stat(filename).st_size
conn.send(str(file_size).encode())
conn.recv(1024)
for line in f:
m.update(line)
conn.send(line)
print("file_md5",m.hexdigest())
f.close()
server.close()
|
normal
|
{
"blob_id": "0a19efea0c8d7e5e248ca3265ffcb55604dc500c",
"index": 7576,
"step-1": "__author__ = 'Administrator'\n\nimport socket,os,time\n\nserver = socket.socket()\n\nserver.bind((\"localhost\",9999))\n\nserver.listen()\n\nwhile True:\n conn,addr = server.accept()\n\n while True:\n data = conn.recv(1024)\n if not data:\n break\n\n cmd,filename = data.decode().split()\n\n if os.path.isfile(filename)\n f = open(filename,\"rb\")\n m = hashlib.md5()\n file_size = os.stat(filename).st_size\n conn.send(str(file_size).encode())\n conn.recv(1024)\n for line in f:\n m.update(line)\n conn.send(line)\n print(\"file_md5\",m.hexdigest())\n f.close()\nserver.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
<|reserved_special_token_0|>
def y(self):
return intensities
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
<|reserved_special_token_0|>
def getPeaks(waveNumbers, intensities):
data = _spectra(waveNumbers, intensities)
waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(
lowerBound, upperBound, steps))
waveletCoeff = np.flipud(waveletCoeff)
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),
columnWindow, rowWindow)
peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)
return peakInfo
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
<|reserved_special_token_0|>
def _filterRidgeLines(maximaArray, rowMax, colMax):
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
for i, row in enumerate(maximaArray):
ridge = []
colPos = row[1]
rowPos = row[0]
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:, :]):
if nextRows[0] == rowPos:
continue
if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -
nextRows[0]) <= rowMax:
ridge.append((rowPos, colPos, nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
<|reserved_special_token_0|>
def getPeaks(waveNumbers, intensities):
data = _spectra(waveNumbers, intensities)
waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(
lowerBound, upperBound, steps))
waveletCoeff = np.flipud(waveletCoeff)
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),
columnWindow, rowWindow)
peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)
return peakInfo
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _spectra:
def __init__(self, x, y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
<|reserved_special_token_0|>
def _findMaxima1D(CWTArray):
maximas = np.zeros(CWTArray.size, dtype=(float, 3))
count = 0
for j, row in enumerate(CWTArray):
for i, element in enumerate(row):
try:
if element > row[i - 1] and element > row[i + 1]:
maximas[count] = steps - j, i, element
count += 1
except IndexError:
pass
return np.vstack(maximas[:count])
<|reserved_special_token_0|>
def _filterRidgeLines(maximaArray, rowMax, colMax):
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
for i, row in enumerate(maximaArray):
ridge = []
colPos = row[1]
rowPos = row[0]
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:, :]):
if nextRows[0] == rowPos:
continue
if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -
nextRows[0]) <= rowMax:
ridge.append((rowPos, colPos, nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
<|reserved_special_token_0|>
def getPeakInfo(ridgeLines, data, waveletCoeff):
peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (
'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',
'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])
for i, lines in enumerate(ridgeLines):
maximum = np.argmax(zip(*lines)[2])
peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2
], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[
maximum][1]]
for i, peaks in enumerate(peakInfo):
SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])
if len(SNR) == 0:
peakInfo['SNR'][i] = 0
else:
SNR = stats.scoreatpercentile(SNR, 95)
peakInfo['SNR'][i] = SNR
return peakInfo
<|reserved_special_token_0|>
def getPeaks(waveNumbers, intensities):
data = _spectra(waveNumbers, intensities)
waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(
lowerBound, upperBound, steps))
waveletCoeff = np.flipud(waveletCoeff)
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),
columnWindow, rowWindow)
peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)
return peakInfo
<|reserved_special_token_1|>
import numpy as np
from scipy import stats
from scipy import interpolate
from math import factorial
from scipy import signal
"""
A continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,
it should work for most datasets.
Parameters
----------
lowerBound: The lowest value of the scale factor to use in the wavelet transform
upperBound: The highest value of the scale factor to use in the wavelet transform
steps: The number of scale factors we want between the highest and lowest bounds
rowWindow: The maximum number of rows that a ridge line can be discontinuous before it is
terminated. I.e. the maximum number of scale factors it can deviate.
colWindow: The maximum number of columns that a ridge line can wander before it is terminated.
I.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.
"""
# CWT Transform parameters
lowerBound = 1
upperBound = 70
steps = 90
# Ridge line filtering parameters
rowWindow = 2
columnWindow = 5
class _spectra:
def __init__(self,x,y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
"""
Simple helper function for finding all of the maxima in the 2D array returned by the wavelet
transform. Works on the basis of a simple comparison between neighbouring elements. These
values form the initial basis for the ridge lines.
"""
def _findMaxima1D(CWTArray):
maximas = np.zeros(CWTArray.size,dtype=(float,3))
# Populate the maxima array with a tuple of the coordinates and the values of the maxima
count = 0
for j,row in enumerate(CWTArray):
for i,element in enumerate(row):
try:
if element > row[i-1] and element > row[i+1]:
maximas[count]= ((steps-j,i,element))
count += 1
except IndexError:
pass
return np.vstack(maximas[:count])
"""
Filter the ridge lines found from the maxima of the CWT coefficient array based on a set
parameters, namely the maximum deviations in wavenumber and scale space. Any lines which are
found from this criteria are considered to be peaks and further evaluated in the following
steps.
"""
def _filterRidgeLines(maximaArray,rowMax,colMax):
# Helper to prevent duplicating ridge lines
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
# Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)
for i,row in enumerate(maximaArray):
ridge = [] # For each maxima start a ridge line
colPos = row[1] # Get the column position of the current maxima
rowPos = row[0] # Get the row position of the current maxima
# If this value is already part of another ridge line, move to the next value
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima
if nextRows[0] == rowPos: # If the scale factors are the same, skip
continue
if np.abs(colPos - nextRows[1]) <= colMax and \
np.abs(rowPos - nextRows[0]) <= rowMax:
ridge.append((rowPos,colPos,nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
# If the ridge lines run all the way to the lowest scale factors, add them to the list
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
"""
For each of the ridge lines found from the filtered CWT array, determine the other
characteristics of the peaks.
The position of the peak is determined from the position of the maxima in the ridge
line.
"""
def getPeakInfo(ridgeLines,data,waveletCoeff):
# For each of the ridge lines we have found, locate the positions of the maxima. These
# correspond to the peak centers.
peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\
('cwtCoeff','f'),('SNR','f'),('length','uint8'),\
('intensity','f'),('wavenumber','f')])
# For each of the ridge lines, add the position of the peak center and the length of the
# line. These are useful for filtering peaks later.
for i,lines in enumerate(ridgeLines):
# Find the index of the maximum CWT coefficient. This is the peak center.
maximum = np.argmax(zip(*lines)[2])
peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\
data.x[lines[maximum][1]],data.y[lines[maximum][1]]
# Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is
# defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.
for i, peaks in enumerate(peakInfo):
SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])
if len(SNR) == 0:
peakInfo['SNR'][i] = 0
else:
SNR = stats.scoreatpercentile(SNR, 95)
peakInfo['SNR'][i] = SNR
return peakInfo
"""
Processes spectral data and returns a structured array of peak information. Peak can then be
filtered based on ridge line length, signal to noise ratio and scale values.
"""
def getPeaks(waveNumbers,intensities):
data = _spectra(waveNumbers,intensities)
# Take the CWT of the spectra. Trim the result to remove padding.
waveletCoeff = signal.cwt(intensities, signal.ricker, \
np.linspace(lowerBound,upperBound,steps))
# Flip the matrix so the highest wavelet coefficient is the top row
waveletCoeff = np.flipud(waveletCoeff)
# Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines
# takes a (scaleFactor,3) array of positions and values of maxima.
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)
# Populate a structured array with peak information
peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)
return peakInfo
|
flexible
|
{
"blob_id": "8f5d9918260e2f50fb229a7067f820a186101b99",
"index": 1080,
"step-1": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n <mask token>\n\n def y(self):\n return intensities\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"step-3": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"step-4": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<mask token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<mask token>\n\n\ndef getPeakInfo(ridgeLines, data, waveletCoeff):\n peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (\n 'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',\n 'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])\n for i, lines in enumerate(ridgeLines):\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2\n ], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[\n maximum][1]]\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n return peakInfo\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"step-5": "import numpy as np\nfrom scipy import stats\nfrom scipy import interpolate\nfrom math import factorial\nfrom scipy import signal\n\n\"\"\"\n\nA continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,\nit should work for most datasets.\n\nParameters\n----------\n\nlowerBound: The lowest value of the scale factor to use in the wavelet transform\nupperBound: The highest value of the scale factor to use in the wavelet transform\nsteps: The number of scale factors we want between the highest and lowest bounds\n\nrowWindow: The maximum number of rows that a ridge line can be discontinuous before it is\nterminated. I.e. the maximum number of scale factors it can deviate.\n\ncolWindow: The maximum number of columns that a ridge line can wander before it is terminated.\nI.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.\n\n\"\"\"\n\n# CWT Transform parameters\nlowerBound = 1\nupperBound = 70\nsteps = 90\n\n# Ridge line filtering parameters\nrowWindow = 2\ncolumnWindow = 5\n\nclass _spectra:\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\"\"\"\n\nSimple helper function for finding all of the maxima in the 2D array returned by the wavelet\ntransform. Works on the basis of a simple comparison between neighbouring elements. These\nvalues form the initial basis for the ridge lines.\n\n\"\"\"\ndef _findMaxima1D(CWTArray):\n\n maximas = np.zeros(CWTArray.size,dtype=(float,3))\n\n # Populate the maxima array with a tuple of the coordinates and the values of the maxima\n count = 0\n for j,row in enumerate(CWTArray):\n for i,element in enumerate(row):\n try:\n if element > row[i-1] and element > row[i+1]:\n maximas[count]= ((steps-j,i,element))\n count += 1\n except IndexError:\n pass\n\n return np.vstack(maximas[:count])\n\n\"\"\"\n\nFilter the ridge lines found from the maxima of the CWT coefficient array based on a set\nparameters, namely the maximum deviations in wavenumber and scale space. Any lines which are\nfound from this criteria are considered to be peaks and further evaluated in the following\nsteps.\n\n\"\"\"\ndef _filterRidgeLines(maximaArray,rowMax,colMax):\n\n # Helper to prevent duplicating ridge lines\n def checkValues(value, ridgeLines):\n\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n\n ridgeLines = []\n\n # Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)\n for i,row in enumerate(maximaArray):\n ridge = [] # For each maxima start a ridge line\n colPos = row[1] # Get the column position of the current maxima\n rowPos = row[0] # Get the row position of the current maxima\n # If this value is already part of another ridge line, move to the next value\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima\n if nextRows[0] == rowPos: # If the scale factors are the same, skip\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and \\\n np.abs(rowPos - nextRows[0]) <= rowMax:\n ridge.append((rowPos,colPos,nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n\n # If the ridge lines run all the way to the lowest scale factors, add them to the list\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n\n return ridgeLines\n\n\"\"\"\n\nFor each of the ridge lines found from the filtered CWT array, determine the other\ncharacteristics of the peaks.\n\nThe position of the peak is determined from the position of the maxima in the ridge\nline.\n\n\"\"\"\ndef getPeakInfo(ridgeLines,data,waveletCoeff):\n\n # For each of the ridge lines we have found, locate the positions of the maxima. These\n # correspond to the peak centers.\n peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\\\n ('cwtCoeff','f'),('SNR','f'),('length','uint8'),\\\n ('intensity','f'),('wavenumber','f')])\n\n # For each of the ridge lines, add the position of the peak center and the length of the\n # line. These are useful for filtering peaks later.\n for i,lines in enumerate(ridgeLines):\n # Find the index of the maximum CWT coefficient. This is the peak center.\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\\\n data.x[lines[maximum][1]],data.y[lines[maximum][1]]\n\n # Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is\n # defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n\n return peakInfo\n\n\"\"\"\n\nProcesses spectral data and returns a structured array of peak information. Peak can then be\nfiltered based on ridge line length, signal to noise ratio and scale values.\n\n\"\"\"\ndef getPeaks(waveNumbers,intensities):\n\n data = _spectra(waveNumbers,intensities)\n\n # Take the CWT of the spectra. Trim the result to remove padding.\n waveletCoeff = signal.cwt(intensities, signal.ricker, \\\n np.linspace(lowerBound,upperBound,steps))\n\n # Flip the matrix so the highest wavelet coefficient is the top row\n waveletCoeff = np.flipud(waveletCoeff)\n\n # Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines\n # takes a (scaleFactor,3) array of positions and values of maxima.\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)\n\n # Populate a structured array with peak information\n peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)\n\n return peakInfo\n",
"step-ids": [
3,
5,
6,
8,
11
]
}
|
[
3,
5,
6,
8,
11
] |
from datetime import datetime
import struct
BEACON_LENGTH = 84
EPS_LENGTH = 20
COM_LENGTH = 10
# reverse engineered
ADCS1_LENGTH = 7
ADCS2_LENGTH = 6
AIS_LENGTH = 20
class EPS(object):
def __init__(self, eps_data):
if len(eps_data) != EPS_LENGTH:
raise InputException(len(eps_data), EPS_LENGTH)
self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\
self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\
self.temp, self.pa_temp, self.main_voltage = struct.unpack(">HIIBHBbbBbbb", eps_data)
self.battery_voltage *= 40
self.cell_diff *= 4
self.battery_current *= 10
self.solar_power *= 20
def __str__(self):
eps_str = ("""EPS:
Boot count:\t\t{0}
Up time:\t\t{1} seconds
Real time clock:\t{2}
Battery voltage:\t{3} mV
Cell difference:\t{4:.1f} mV
Battery current:\t{5} mA
Solar power:\t\t{6}
Temperature:\t\t{7} C
PA temperature:\t\t{8} C""".format(
self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock),
self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,
self.temp, self.pa_temp))
return eps_str
class COM(object):
def __init__(self, com_data):
self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\
self.latest_bit_correction, self.latest_byte_correction = \
struct.unpack(">HHHhBB", com_data)
self.boot_count &= 0x1fff
def __str__(self):
com_str = ("""COM:
Boot count:\t\t{0}
Packets received:\t{1}
Packets send:\t\t{2}
Latest rssi:\t\t{3}
Latest bit corrections:\t{4}
Latest byte corrections:{5}""".format(
self.boot_count, self.packets_received, self.packets_send,
self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction))
return com_str
# Reverse engineered classes
class ADCS1(object):
def __init__(self, adcs1_data):
data = struct.unpack(">hhhB", adcs1_data)
self.bdot = tuple(data[0:3])
self.state = data[3]
def __str__(self):
adcs1_str = ("""ADCS1:
State:\t{}
Bdot:\t{}""".format(self.state, self.bdot))
return adcs1_str
class ADCS2(object):
def __init__(self, adcs2_data):
self.gyro = tuple(struct.unpack(">hhh", adcs2_data))
def __str__(self):
adcs2_str = ("""ADCS2:
Gyro:\t{}""".format(self.gyro))
return adcs2_str
class AIS(object):
def __init__(self, ais_data):
# there are some fields which apparently are 0 all the time
# this fields can't be identified by reverse engineering
self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(">HhhH12s", ais_data)
def __str__(self):
ais_str = ("""AIS:
Boot count:\t{}
Unique MSSI:\t{}""".format(self.boot_count, self.unique_mssi))
return ais_str
## Beacon
# The beacon class takes a string of bytes as input, and parses it to generate
# a representation of the beacon format used by AASUAT4
# The beacon format is as follows:
# [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ]
# [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ]
# This is not correct EPS is 20 bytes and COM is 10 bytes
# The remaining fields seem to have the correct length
#
# For each subsystem, which are valid, are the corresponding data bytes passed to another
# class which parses the information.
#
# The __str__ method returns a human readable string with key information from the beacon
class Beacon(object):
def __init__(self, raw_data):
if len(raw_data) != BEACON_LENGTH:
raise ValueError("Malformed beacon (incorrect length)")
self.subsystems = {}
valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \
struct.unpack(("B"+"{}s"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)
# reverse engineered valid bits
# EPS and COM are known from university team code
# valid byte is usually 0x27
# in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid
eps_valid = valid & (1 << 0)
com_valid = valid & (1 << 1)
adcs1_valid = valid & (1 << 2)
adcs2_valid = valid & (1 << 3)
ais1_valid = valid & (1 << 4)
ais2_valid = valid & (1 << 5)
if eps_valid:
self.subsystems['EPS'] = EPS(eps_raw)
if com_valid:
self.subsystems['COM'] = COM(com_raw)
if adcs1_valid:
self.subsystems['ADCS1'] = ADCS1(adcs1_raw)
if adcs2_valid:
self.subsystems['ADCS2'] = ADCS2(adcs2_raw)
if ais1_valid:
self.subsystems['AIS1'] = AIS(ais1_raw)
if ais2_valid:
self.subsystems['AIS2'] = AIS(ais2_raw)
def __str__(self):
beacon_str = ""
for k,v in self.subsystems.items():
beacon_str += str(v) + "\n"
return beacon_str
|
normal
|
{
"blob_id": "505689803c8f4490619ab1a7579fde1e2c18c538",
"index": 5532,
"step-1": "<mask token>\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n <mask token>\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-2": "<mask token>\n\n\nclass ADCS1(object):\n <mask token>\n <mask token>\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-3": "<mask token>\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\n\nclass EPS(object):\n\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n (self.boot_count, self.uptime, self.rt_clock, self.ping_status,\n self.subsystem_status, self.battery_voltage, self.cell_diff,\n self.battery_current, self.solar_power, self.temp, self.pa_temp,\n self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data)\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\n \"\"\"EPS:\n Boot count:\t\t{0}\n Up time:\t\t{1} seconds\n Real time clock:\t{2}\n Battery voltage:\t{3} mV\n Cell difference:\t{4:.1f} mV\n Battery current:\t{5} mA\n Solar power:\t\t{6}\n Temperature:\t\t{7} C\n PA temperature:\t\t{8} C\"\"\"\n .format(self.boot_count, self.uptime, datetime.fromtimestamp(\n self.rt_clock), self.battery_voltage, self.cell_diff, self.\n battery_current, self.solar_power, self.temp, self.pa_temp))\n return eps_str\n\n\nclass COM(object):\n\n def __init__(self, com_data):\n (self.boot_count, self.packets_received, self.packets_send, self.\n latest_rssi, self.latest_bit_correction, self.\n latest_byte_correction) = struct.unpack('>HHHhBB', com_data)\n self.boot_count &= 8191\n\n def __str__(self):\n com_str = (\n \"\"\"COM:\n Boot count:\t\t{0}\n Packets received:\t{1}\n Packets send:\t\t{2}\n Latest rssi:\t\t{3}\n Latest bit corrections:\t{4}\n Latest byte corrections:{5}\"\"\"\n .format(self.boot_count, self.packets_received, self.\n packets_send, self.latest_rssi, self.latest_bit_correction,\n self.latest_byte_correction))\n return com_str\n\n\nclass ADCS1(object):\n\n def __init__(self, adcs1_data):\n data = struct.unpack('>hhhB', adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = \"\"\"ADCS1:\n State:\t{}\n Bdot:\t{}\"\"\".format(self\n .state, self.bdot)\n return adcs1_str\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-4": "from datetime import datetime\nimport struct\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\n\nclass EPS(object):\n\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n (self.boot_count, self.uptime, self.rt_clock, self.ping_status,\n self.subsystem_status, self.battery_voltage, self.cell_diff,\n self.battery_current, self.solar_power, self.temp, self.pa_temp,\n self.main_voltage) = struct.unpack('>HIIBHBbbBbbb', eps_data)\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\n \"\"\"EPS:\n Boot count:\t\t{0}\n Up time:\t\t{1} seconds\n Real time clock:\t{2}\n Battery voltage:\t{3} mV\n Cell difference:\t{4:.1f} mV\n Battery current:\t{5} mA\n Solar power:\t\t{6}\n Temperature:\t\t{7} C\n PA temperature:\t\t{8} C\"\"\"\n .format(self.boot_count, self.uptime, datetime.fromtimestamp(\n self.rt_clock), self.battery_voltage, self.cell_diff, self.\n battery_current, self.solar_power, self.temp, self.pa_temp))\n return eps_str\n\n\nclass COM(object):\n\n def __init__(self, com_data):\n (self.boot_count, self.packets_received, self.packets_send, self.\n latest_rssi, self.latest_bit_correction, self.\n latest_byte_correction) = struct.unpack('>HHHhBB', com_data)\n self.boot_count &= 8191\n\n def __str__(self):\n com_str = (\n \"\"\"COM:\n Boot count:\t\t{0}\n Packets received:\t{1}\n Packets send:\t\t{2}\n Latest rssi:\t\t{3}\n Latest bit corrections:\t{4}\n Latest byte corrections:{5}\"\"\"\n .format(self.boot_count, self.packets_received, self.\n packets_send, self.latest_rssi, self.latest_bit_correction,\n self.latest_byte_correction))\n return com_str\n\n\nclass ADCS1(object):\n\n def __init__(self, adcs1_data):\n data = struct.unpack('>hhhB', adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = \"\"\"ADCS1:\n State:\t{}\n Bdot:\t{}\"\"\".format(self\n .state, self.bdot)\n return adcs1_str\n\n\nclass ADCS2(object):\n\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack('>hhh', adcs2_data))\n\n def __str__(self):\n adcs2_str = \"\"\"ADCS2:\n Gyro:\t{}\"\"\".format(self.gyro)\n return adcs2_str\n\n\nclass AIS(object):\n\n def __init__(self, ais_data):\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack('>HhhH12s',\n ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\t{}\n Unique MSSI:\t{}\"\"\"\n .format(self.boot_count, self.unique_mssi))\n return ais_str\n\n\nclass Beacon(object):\n\n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError('Malformed beacon (incorrect length)')\n self.subsystems = {}\n (valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw) = (\n struct.unpack(('B' + '{}s' * 6).format(EPS_LENGTH, COM_LENGTH,\n ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data))\n eps_valid = valid & 1 << 0\n com_valid = valid & 1 << 1\n adcs1_valid = valid & 1 << 2\n adcs2_valid = valid & 1 << 3\n ais1_valid = valid & 1 << 4\n ais2_valid = valid & 1 << 5\n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n\n def __str__(self):\n beacon_str = ''\n for k, v in self.subsystems.items():\n beacon_str += str(v) + '\\n'\n return beacon_str\n",
"step-5": "from datetime import datetime\nimport struct\n\nBEACON_LENGTH = 84\nEPS_LENGTH = 20\nCOM_LENGTH = 10\n\n# reverse engineered\nADCS1_LENGTH = 7\nADCS2_LENGTH = 6\nAIS_LENGTH = 20\n\nclass EPS(object):\n def __init__(self, eps_data):\n if len(eps_data) != EPS_LENGTH:\n raise InputException(len(eps_data), EPS_LENGTH)\n\n self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\\\n self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\\\n self.temp, self.pa_temp, self.main_voltage = struct.unpack(\">HIIBHBbbBbbb\", eps_data)\n\n self.battery_voltage *= 40\n self.cell_diff *= 4\n self.battery_current *= 10\n self.solar_power *= 20\n\n def __str__(self):\n eps_str = (\"\"\"EPS:\n Boot count:\\t\\t{0}\n Up time:\\t\\t{1} seconds\n Real time clock:\\t{2}\n Battery voltage:\\t{3} mV\n Cell difference:\\t{4:.1f} mV\n Battery current:\\t{5} mA\n Solar power:\\t\\t{6}\n Temperature:\\t\\t{7} C\n PA temperature:\\t\\t{8} C\"\"\".format(\n self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock),\n self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\n self.temp, self.pa_temp))\n\n return eps_str\n\n\nclass COM(object):\n def __init__(self, com_data):\n self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\\\n self.latest_bit_correction, self.latest_byte_correction = \\\n struct.unpack(\">HHHhBB\", com_data)\n\n self.boot_count &= 0x1fff\n \n def __str__(self):\n com_str = (\"\"\"COM:\n Boot count:\\t\\t{0}\n Packets received:\\t{1}\n Packets send:\\t\\t{2}\n Latest rssi:\\t\\t{3}\n Latest bit corrections:\\t{4}\n Latest byte corrections:{5}\"\"\".format(\n self.boot_count, self.packets_received, self.packets_send,\n self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction))\n\n return com_str\n\n# Reverse engineered classes\nclass ADCS1(object):\n def __init__(self, adcs1_data):\n data = struct.unpack(\">hhhB\", adcs1_data)\n self.bdot = tuple(data[0:3])\n self.state = data[3]\n\n def __str__(self):\n adcs1_str = (\"\"\"ADCS1:\n State:\\t{}\n Bdot:\\t{}\"\"\".format(self.state, self.bdot))\n\n return adcs1_str\n\nclass ADCS2(object):\n def __init__(self, adcs2_data):\n self.gyro = tuple(struct.unpack(\">hhh\", adcs2_data))\n\n def __str__(self):\n adcs2_str = (\"\"\"ADCS2:\n Gyro:\\t{}\"\"\".format(self.gyro))\n\n return adcs2_str\n\nclass AIS(object):\n def __init__(self, ais_data):\n # there are some fields which apparently are 0 all the time\n # this fields can't be identified by reverse engineering\n self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(\">HhhH12s\", ais_data)\n\n def __str__(self):\n ais_str = (\"\"\"AIS:\n Boot count:\\t{}\n Unique MSSI:\\t{}\"\"\".format(self.boot_count, self.unique_mssi))\n\n return ais_str\n\n## Beacon\n# The beacon class takes a string of bytes as input, and parses it to generate\n# a representation of the beacon format used by AASUAT4\n# The beacon format is as follows:\n\n\n# [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ]\n# [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ]\n# This is not correct EPS is 20 bytes and COM is 10 bytes\n# The remaining fields seem to have the correct length\n\n#\n# For each subsystem, which are valid, are the corresponding data bytes passed to another\n# class which parses the information.\n#\n# The __str__ method returns a human readable string with key information from the beacon\nclass Beacon(object):\n \n def __init__(self, raw_data):\n if len(raw_data) != BEACON_LENGTH:\n raise ValueError(\"Malformed beacon (incorrect length)\")\n\n self.subsystems = {}\n\n valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \\\n struct.unpack((\"B\"+\"{}s\"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)\n\n # reverse engineered valid bits\n # EPS and COM are known from university team code\n # valid byte is usually 0x27\n # in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid\n eps_valid = valid & (1 << 0)\n com_valid = valid & (1 << 1)\n adcs1_valid = valid & (1 << 2)\n adcs2_valid = valid & (1 << 3)\n ais1_valid = valid & (1 << 4)\n ais2_valid = valid & (1 << 5)\n \n if eps_valid:\n self.subsystems['EPS'] = EPS(eps_raw)\n if com_valid:\n self.subsystems['COM'] = COM(com_raw)\n if adcs1_valid:\n self.subsystems['ADCS1'] = ADCS1(adcs1_raw)\n if adcs2_valid:\n self.subsystems['ADCS2'] = ADCS2(adcs2_raw)\n if ais1_valid:\n self.subsystems['AIS1'] = AIS(ais1_raw)\n if ais2_valid:\n self.subsystems['AIS2'] = AIS(ais2_raw)\n \n def __str__(self):\n beacon_str = \"\"\n for k,v in self.subsystems.items():\n beacon_str += str(v) + \"\\n\"\n return beacon_str\n\n",
"step-ids": [
8,
10,
19,
20,
21
]
}
|
[
8,
10,
19,
20,
21
] |
import requests
import json
l = list()
with open ( "token.txt", "r") as f:
token = f.read()
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
with open('dataset_24476_4.txt', 'r') as id:
for line in id:
address = "https://api.artsy.net/api/artists/" + line.strip()
# инициируем запрос с заголовком
r = requests.get(address, headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
l.append((j['sortable_name'], j['birthday']))
#l.append((('Warhol Bandy', '1928')))
#l.append((('Warhol Aandy', '1928')))
l = sorted(l, key=lambda tup: (tup[1], tup[0]))
for i in l:
print(i[0])
# year = '0000'
# new_l = []
#
# k = []
#
# for i in l:
# if i[1] != year:
# k = []
# k.append(i[0])
# year = i[1]
# else:
# k.append(i[0])
# k.sort()
# print(next(name for name in k))
|
normal
|
{
"blob_id": "e1ecc08f66e094841647f72b78bcd29ed8d32668",
"index": 5976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\n<mask token>\nfor i in l:\n print(i[0])\n",
"step-3": "<mask token>\nl = list()\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n",
"step-4": "import requests\nimport json\nl = list()\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n",
"step-5": "import requests\nimport json\n\nl = list()\n\nwith open ( \"token.txt\", \"r\") as f:\n\n token = f.read()\n\n # создаем заголовок, содержащий наш токен\n headers = {\"X-Xapp-Token\" : token}\n\n with open('dataset_24476_4.txt', 'r') as id:\n\n for line in id:\n address = \"https://api.artsy.net/api/artists/\" + line.strip()\n # инициируем запрос с заголовком\n r = requests.get(address, headers=headers)\n\n # разбираем ответ сервера\n j = json.loads(r.text)\n\n l.append((j['sortable_name'], j['birthday']))\n\n#l.append((('Warhol Bandy', '1928')))\n#l.append((('Warhol Aandy', '1928')))\n\n\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n\n# year = '0000'\n# new_l = []\n#\n# k = []\n#\n# for i in l:\n# if i[1] != year:\n# k = []\n# k.append(i[0])\n# year = i[1]\n# else:\n# k.append(i[0])\n# k.sort()\n# print(next(name for name in k))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def getTextWithoutSpaces(text):
withoutLineBreaks = text.replace('\n', '')
withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)
return withoutSpaces
def getSentences(sentences, text):
data = re.findall('\\b[a-zA-Z]+|[.!?]', text)
unique_words = set(data)
sentenceCounter = 0
wordCounter = 0
for i in sentences:
sentenceCounter += 1
i = i.lower()
words = i.split()
wordCounter += len(words)
print('Total sentence in the text : ' + str(sentenceCounter - 1))
print('Total word in the text : ' + str(wordCounter))
print('Unique word number : ' + str(len(unique_words) - 1))
<|reserved_special_token_0|>
def listResults():
print('')
split_into_sentences(getText())
print('')
words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(
getTextWithoutSpaces(getText()))
listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.
calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))
words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,
listOfProbUnigram, words)
ngram.findLeastValues(words, flipped)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def split_into_sentences(text):
text = text.lower()
sentences = re.split('(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s',
text)
getSentences(sentences, text)
return sentences
def getTextWithoutSpaces(text):
withoutLineBreaks = text.replace('\n', '')
withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)
return withoutSpaces
def getSentences(sentences, text):
data = re.findall('\\b[a-zA-Z]+|[.!?]', text)
unique_words = set(data)
sentenceCounter = 0
wordCounter = 0
for i in sentences:
sentenceCounter += 1
i = i.lower()
words = i.split()
wordCounter += len(words)
print('Total sentence in the text : ' + str(sentenceCounter - 1))
print('Total word in the text : ' + str(wordCounter))
print('Unique word number : ' + str(len(unique_words) - 1))
def getText():
file = open('hw01_FireFairies.txt')
data = file.read()
return data
def listResults():
print('')
split_into_sentences(getText())
print('')
words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(
getTextWithoutSpaces(getText()))
listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.
calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))
words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,
listOfProbUnigram, words)
ngram.findLeastValues(words, flipped)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def split_into_sentences(text):
text = text.lower()
sentences = re.split('(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s',
text)
getSentences(sentences, text)
return sentences
def getTextWithoutSpaces(text):
withoutLineBreaks = text.replace('\n', '')
withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)
return withoutSpaces
def getSentences(sentences, text):
data = re.findall('\\b[a-zA-Z]+|[.!?]', text)
unique_words = set(data)
sentenceCounter = 0
wordCounter = 0
for i in sentences:
sentenceCounter += 1
i = i.lower()
words = i.split()
wordCounter += len(words)
print('Total sentence in the text : ' + str(sentenceCounter - 1))
print('Total word in the text : ' + str(wordCounter))
print('Unique word number : ' + str(len(unique_words) - 1))
def getText():
file = open('hw01_FireFairies.txt')
data = file.read()
return data
def listResults():
print('')
split_into_sentences(getText())
print('')
words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(
getTextWithoutSpaces(getText()))
listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.
calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))
words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,
listOfProbUnigram, words)
ngram.findLeastValues(words, flipped)
if __name__ == '__main__':
listResults()
<|reserved_special_token_1|>
import re
import ngram
import smoothedNgram
def split_into_sentences(text):
text = text.lower()
sentences = re.split('(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s',
text)
getSentences(sentences, text)
return sentences
def getTextWithoutSpaces(text):
withoutLineBreaks = text.replace('\n', '')
withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)
return withoutSpaces
def getSentences(sentences, text):
data = re.findall('\\b[a-zA-Z]+|[.!?]', text)
unique_words = set(data)
sentenceCounter = 0
wordCounter = 0
for i in sentences:
sentenceCounter += 1
i = i.lower()
words = i.split()
wordCounter += len(words)
print('Total sentence in the text : ' + str(sentenceCounter - 1))
print('Total word in the text : ' + str(wordCounter))
print('Unique word number : ' + str(len(unique_words) - 1))
def getText():
file = open('hw01_FireFairies.txt')
data = file.read()
return data
def listResults():
print('')
split_into_sentences(getText())
print('')
words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(
getTextWithoutSpaces(getText()))
listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.
calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))
words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,
listOfProbUnigram, words)
ngram.findLeastValues(words, flipped)
if __name__ == '__main__':
listResults()
<|reserved_special_token_1|>
import re
import ngram
import smoothedNgram
def split_into_sentences(text):
text = text.lower()
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
getSentences(sentences,text)
return sentences
def getTextWithoutSpaces(text):
withoutLineBreaks = text.replace("\n", "")
withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)
return withoutSpaces
def getSentences(sentences,text):
data = re.findall(r'\b[a-zA-Z]+|[.!?]', text)
unique_words = set(data)
sentenceCounter=0
wordCounter=0
for i in sentences:
sentenceCounter += 1
i = i.lower()
words = i.split()
wordCounter += len(words)
print('Total sentence in the text : ' + str(sentenceCounter-1))
print('Total word in the text : ' + str(wordCounter))
print('Unique word number : ' + str(len(unique_words)-1))
def getText():
file = open("hw01_FireFairies.txt")
data = file.read()
return data
def listResults():
print('')
split_into_sentences(getText())
print('')
words,listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(getTextWithoutSpaces(getText()))
listOfProbBigram, listOfBigrams, listOfProbUnigram, words = ngram.calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts)
words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams, listOfProbUnigram, words)
ngram.findLeastValues(words, flipped)
if __name__ == '__main__':
listResults()
|
flexible
|
{
"blob_id": "6d7db5b9a64ec25763f5af6ceec1a46d629d549c",
"index": 472,
"step-1": "<mask token>\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\n<mask token>\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split('(?<!\\\\w\\\\.\\\\w.)(?<![A-Z][a-z]\\\\.)(?<=\\\\.|\\\\?)\\\\s',\n text)\n getSentences(sentences, text)\n return sentences\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\ndef getText():\n file = open('hw01_FireFairies.txt')\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split('(?<!\\\\w\\\\.\\\\w.)(?<![A-Z][a-z]\\\\.)(?<=\\\\.|\\\\?)\\\\s',\n text)\n getSentences(sentences, text)\n return sentences\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\ndef getText():\n file = open('hw01_FireFairies.txt')\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\nif __name__ == '__main__':\n listResults()\n",
"step-4": "import re\nimport ngram\nimport smoothedNgram\n\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split('(?<!\\\\w\\\\.\\\\w.)(?<![A-Z][a-z]\\\\.)(?<=\\\\.|\\\\?)\\\\s',\n text)\n getSentences(sentences, text)\n return sentences\n\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace('\\n', '')\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\n\ndef getSentences(sentences, text):\n data = re.findall('\\\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter = 0\n wordCounter = 0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter - 1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words) - 1))\n\n\ndef getText():\n file = open('hw01_FireFairies.txt')\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words, listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(\n getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = (ngram.\n calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts))\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams,\n listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\nif __name__ == '__main__':\n listResults()\n",
"step-5": "import re\nimport ngram\nimport smoothedNgram\n\ndef split_into_sentences(text):\n text = text.lower()\n sentences = re.split(r'(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s', text)\n getSentences(sentences,text)\n return sentences\n\ndef getTextWithoutSpaces(text):\n withoutLineBreaks = text.replace(\"\\n\", \"\")\n withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)\n return withoutSpaces\n\ndef getSentences(sentences,text):\n data = re.findall(r'\\b[a-zA-Z]+|[.!?]', text)\n unique_words = set(data)\n sentenceCounter=0\n wordCounter=0\n for i in sentences:\n sentenceCounter += 1\n i = i.lower()\n words = i.split()\n wordCounter += len(words)\n print('Total sentence in the text : ' + str(sentenceCounter-1))\n print('Total word in the text : ' + str(wordCounter))\n print('Unique word number : ' + str(len(unique_words)-1))\n\n\ndef getText():\n file = open(\"hw01_FireFairies.txt\")\n data = file.read()\n return data\n\n\ndef listResults():\n print('')\n split_into_sentences(getText())\n print('')\n words,listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(getTextWithoutSpaces(getText()))\n listOfProbBigram, listOfBigrams, listOfProbUnigram, words = ngram.calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts)\n words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams, listOfProbUnigram, words)\n ngram.findLeastValues(words, flipped)\n\n\nif __name__ == '__main__':\n listResults()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from eums.test.api.api_test_helpers import create_option
from eums.test.factories.question_factory import MultipleChoiceQuestionFactory
from eums.test.api.authenticated_api_test_case import AuthenticatedAPITestCase
from eums.test.config import BACKEND_URL
from eums.models.question import MultipleChoiceQuestion
ENDPOINT_URL = BACKEND_URL + 'option/'
RECEIVED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'received-options/'
QUALITY_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'quality-options/'
SATISFIED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'satisfied-options/'
class OptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_create_item(self):
question = MultipleChoiceQuestionFactory()
option_details = {'text': "Bad", 'question': question.id}
response = self.client.post(ENDPOINT_URL, option_details, format='json')
self.assertEqual(response.status_code, 201)
self.assertDictContainsSubset(option_details, response.data)
def test_should_get_options_sorted_by_text(self):
question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "B Option", 'question': question.id}
option_two_details = {'text': "A Option", 'question': question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
get_response = self.client.get(ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_two_details, get_response.data[0])
self.assertDictContainsSubset(option_one_details, get_response.data[1])
class ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_only_get_received_options(self):
received_question,_ = MultipleChoiceQuestion.objects.get_or_create(
uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'],
text='Was product received?', label='productReceived'
)
other_question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "Yes", 'question': received_question.id}
option_two_details = {'text': "No", 'question': received_question.id}
option_three_details = {'text': "Other", 'question': other_question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
create_option(self, option_three_details)
get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_one_details, get_response.data[0])
self.assertDictContainsSubset(option_two_details, get_response.data[2])
self.assertNotIn(option_three_details, get_response.data)
class QualityOptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_only_get_quality_options_sorted_by_text(self):
quality_question,_ = MultipleChoiceQuestion.objects.get_or_create(
uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'],
text='What is the quality of the product?', label='qualityOfProduct'
)
other_question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "B Option", 'question': quality_question.id}
option_two_details = {'text': "A Option", 'question': quality_question.id}
option_three_details = {'text': "C Option", 'question': other_question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
create_option(self, option_three_details)
get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_two_details, get_response.data[0])
self.assertDictContainsSubset(option_one_details, get_response.data[1])
self.assertNotIn(option_three_details, get_response.data)
class SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):
def test_should_only_get_satisfied_options(self):
satisfied_question,_ = MultipleChoiceQuestion.objects.get_or_create(
uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'],
text='Are you satisfied with the product?', label='satisfiedWithProduct'
)
other_question = MultipleChoiceQuestionFactory()
option_one_details = {'text': "Yes", 'question': satisfied_question.id}
option_two_details = {'text': "No", 'question': satisfied_question.id}
option_three_details = {'text': "Other", 'question': other_question.id}
create_option(self, option_one_details)
create_option(self, option_two_details)
create_option(self, option_three_details)
get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)
self.assertEqual(get_response.status_code, 200)
self.assertDictContainsSubset(option_one_details, get_response.data[0])
self.assertDictContainsSubset(option_two_details, get_response.data[2])
self.assertNotIn(option_three_details, get_response.data)
|
normal
|
{
"blob_id": "1152f144e17c11416f9ed56b4408f18615b16dc2",
"index": 5187,
"step-1": "<mask token>\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n <mask token>\n <mask token>\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-2": "<mask token>\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': 'Bad', 'question': question.id}\n response = self.client.post(ENDPOINT_URL, option_details, format='json'\n )\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n <mask token>\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-3": "<mask token>\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': 'Bad', 'question': question.id}\n response = self.client.post(ENDPOINT_URL, option_details, format='json'\n )\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n\n def test_should_get_options_sorted_by_text(self):\n question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question': question.id}\n option_two_details = {'text': 'A Option', 'question': question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n get_response = self.client.get(ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-4": "<mask token>\nENDPOINT_URL = BACKEND_URL + 'option/'\nRECEIVED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'received-options/'\nQUALITY_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'quality-options/'\nSATISFIED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'satisfied-options/'\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': 'Bad', 'question': question.id}\n response = self.client.post(ENDPOINT_URL, option_details, format='json'\n )\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n\n def test_should_get_options_sorted_by_text(self):\n question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question': question.id}\n option_two_details = {'text': 'A Option', 'question': question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n get_response = self.client.get(ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_received_options(self):\n received_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Was product received?', label='productReceived')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': received_question.id}\n option_two_details = {'text': 'No', 'question': received_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'What is the quality of the product?', label='qualityOfProduct')\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'B Option', 'question':\n quality_question.id}\n option_two_details = {'text': 'A Option', 'question':\n quality_question.id}\n option_three_details = {'text': 'C Option', 'question':\n other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n\n def test_should_only_get_satisfied_options(self):\n satisfied_question, _ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'], text=\n 'Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': 'Yes', 'question': satisfied_question.id}\n option_two_details = {'text': 'No', 'question': satisfied_question.id}\n option_three_details = {'text': 'Other', 'question': other_question.id}\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n",
"step-5": "from eums.test.api.api_test_helpers import create_option\nfrom eums.test.factories.question_factory import MultipleChoiceQuestionFactory\nfrom eums.test.api.authenticated_api_test_case import AuthenticatedAPITestCase\nfrom eums.test.config import BACKEND_URL\nfrom eums.models.question import MultipleChoiceQuestion\n\n\nENDPOINT_URL = BACKEND_URL + 'option/'\nRECEIVED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'received-options/'\nQUALITY_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'quality-options/'\nSATISFIED_OPTIONS_ENDPOINT_URL = BACKEND_URL + 'satisfied-options/'\n\n\nclass OptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_create_item(self):\n question = MultipleChoiceQuestionFactory()\n option_details = {'text': \"Bad\", 'question': question.id}\n\n response = self.client.post(ENDPOINT_URL, option_details, format='json')\n\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(option_details, response.data)\n\n def test_should_get_options_sorted_by_text(self):\n question = MultipleChoiceQuestionFactory()\n option_one_details = {'text': \"B Option\", 'question': question.id}\n option_two_details = {'text': \"A Option\", 'question': question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n\n get_response = self.client.get(ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n\nclass ReceivedOptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_only_get_received_options(self):\n received_question,_ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf97d-59b8-4bd3-815b-783abd3dfad9'],\n text='Was product received?', label='productReceived'\n )\n other_question = MultipleChoiceQuestionFactory()\n\n option_one_details = {'text': \"Yes\", 'question': received_question.id}\n option_two_details = {'text': \"No\", 'question': received_question.id}\n option_three_details = {'text': \"Other\", 'question': other_question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n\n get_response = self.client.get(RECEIVED_OPTIONS_ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)\n\nclass QualityOptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_only_get_quality_options_sorted_by_text(self):\n quality_question,_ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf92d-59b8-4bd3-815b-783abd3dfad9'],\n text='What is the quality of the product?', label='qualityOfProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n\n option_one_details = {'text': \"B Option\", 'question': quality_question.id}\n option_two_details = {'text': \"A Option\", 'question': quality_question.id}\n option_three_details = {'text': \"C Option\", 'question': other_question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n\n get_response = self.client.get(QUALITY_OPTIONS_ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_two_details, get_response.data[0])\n self.assertDictContainsSubset(option_one_details, get_response.data[1])\n self.assertNotIn(option_three_details, get_response.data)\n\nclass SatisfiedOptionsEndPointTest(AuthenticatedAPITestCase):\n def test_should_only_get_satisfied_options(self):\n satisfied_question,_ = MultipleChoiceQuestion.objects.get_or_create(\n uuids=['6c1cf27d-59b8-4bd3-815b-783abd3dfad9'],\n text='Are you satisfied with the product?', label='satisfiedWithProduct'\n )\n other_question = MultipleChoiceQuestionFactory()\n\n option_one_details = {'text': \"Yes\", 'question': satisfied_question.id}\n option_two_details = {'text': \"No\", 'question': satisfied_question.id}\n option_three_details = {'text': \"Other\", 'question': other_question.id}\n\n create_option(self, option_one_details)\n create_option(self, option_two_details)\n create_option(self, option_three_details)\n\n get_response = self.client.get(SATISFIED_OPTIONS_ENDPOINT_URL)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertDictContainsSubset(option_one_details, get_response.data[0])\n self.assertDictContainsSubset(option_two_details, get_response.data[2])\n self.assertNotIn(option_three_details, get_response.data)",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
/Users/andreilyskov/anaconda/lib/python3.5/sre_compile.py
|
normal
|
{
"blob_id": "faf4f4d26236ac555594ef6913a0d43c3516f1f2",
"index": 2063,
"step-1": "/Users/andreilyskov/anaconda/lib/python3.5/sre_compile.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/bin/python
import sys
import notify2
import subprocess
from time import sleep
def notification(message: str):
"""
Display notification to the desktop
Task:
1. show() -> it will generate a complete new pop
2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.
Usage : python <filename.py> typeObj:str value:int objective:str
typeObj: RAM/SWAP/NORMAL
value: current usage of RAM or SWAP (for NORMAL, the value = 0)
objective: show/update
"""
# initialize the notification
notify2.init("notifywhenLOAD")
notifyObj = notify2.Notification("Emergency Alert!", message)
notifyObj.set_timeout(12000)
return notifyObj
def main():
a = notification(f"{sys.argv[1]} exceeds {sys.argv[2]}")
if sys.argv[1] in ["RAM", "SWAP"] and sys.argv[3] == "update":
a.update(f"{sys.argv[1]} Alert!! Warning for death")
# a.update('river')
a.set_urgency(2)
a.show()
elif sys.argv[1] in ["RAM", "SWAP"] and sys.argv[3] == "show":
a.set_timeout(10000)
a.set_urgency(1)
a.show()
elif sys.argv[1] == "NORMAL":
a.update("ChiLLax!!! Nothing to worry about")
a.set_urgency(0)
a.show()
main()
|
normal
|
{
"blob_id": "8a7904881d936a3cb421ed5550856b600894fcee",
"index": 5397,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n notify2.init('notifywhenLOAD')\n notifyObj = notify2.Notification('Emergency Alert!', message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f'{sys.argv[1]} exceeds {sys.argv[2]}')\n if sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'update':\n a.update(f'{sys.argv[1]} Alert!! Warning for death')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'show':\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == 'NORMAL':\n a.update('ChiLLax!!! Nothing to worry about')\n a.set_urgency(0)\n a.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n notify2.init('notifywhenLOAD')\n notifyObj = notify2.Notification('Emergency Alert!', message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f'{sys.argv[1]} exceeds {sys.argv[2]}')\n if sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'update':\n a.update(f'{sys.argv[1]} Alert!! Warning for death')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'show':\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == 'NORMAL':\n a.update('ChiLLax!!! Nothing to worry about')\n a.set_urgency(0)\n a.show()\n\n\nmain()\n",
"step-4": "import sys\nimport notify2\nimport subprocess\nfrom time import sleep\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n notify2.init('notifywhenLOAD')\n notifyObj = notify2.Notification('Emergency Alert!', message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f'{sys.argv[1]} exceeds {sys.argv[2]}')\n if sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'update':\n a.update(f'{sys.argv[1]} Alert!! Warning for death')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in ['RAM', 'SWAP'] and sys.argv[3] == 'show':\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == 'NORMAL':\n a.update('ChiLLax!!! Nothing to worry about')\n a.set_urgency(0)\n a.show()\n\n\nmain()\n",
"step-5": "#!/bin/python\nimport sys\nimport notify2\nimport subprocess\nfrom time import sleep\n\n\ndef notification(message: str):\n \"\"\"\n Display notification to the desktop\n Task:\n 1. show() -> it will generate a complete new pop\n 2. update() -> it will update the payload part of same notification pop-up, not issuing any new one.\n Usage : python <filename.py> typeObj:str value:int objective:str\n typeObj: RAM/SWAP/NORMAL\n value: current usage of RAM or SWAP (for NORMAL, the value = 0)\n objective: show/update \n \"\"\"\n # initialize the notification\n notify2.init(\"notifywhenLOAD\")\n notifyObj = notify2.Notification(\"Emergency Alert!\", message)\n notifyObj.set_timeout(12000)\n return notifyObj\n\n\ndef main():\n a = notification(f\"{sys.argv[1]} exceeds {sys.argv[2]}\")\n if sys.argv[1] in [\"RAM\", \"SWAP\"] and sys.argv[3] == \"update\":\n a.update(f\"{sys.argv[1]} Alert!! Warning for death\")\n # a.update('river')\n a.set_urgency(2)\n a.show()\n elif sys.argv[1] in [\"RAM\", \"SWAP\"] and sys.argv[3] == \"show\":\n a.set_timeout(10000)\n a.set_urgency(1)\n a.show()\n elif sys.argv[1] == \"NORMAL\":\n a.update(\"ChiLLax!!! Nothing to worry about\")\n a.set_urgency(0)\n a.show()\n\n\nmain()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(' May 2012', result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(' March 2012', result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
<|reserved_special_token_0|>
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28']
self.assertEqual(expected, cal.days())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(' May 2012', result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(' March 2012', result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
def test_zeller_again(self):
cal = Month(6, 2999)
self.assertEqual(6, cal.zeller())
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28']
self.assertEqual(expected, cal.days())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(' May 2012', result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(' March 2012', result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
def test_zeller_again(self):
cal = Month(6, 2999)
self.assertEqual(6, cal.zeller())
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28']
self.assertEqual(expected, cal.days())
def test_format_days(self):
cal = Month(2, 1990)
expected = [' ', ' ', ' ', ' ', ' 1', ' 2', ' 3', ' 4', ' 5',
' 6', ' 7', ' 8', ' 9', '10', '11', '12', '13', '14', '15',
'16', '17', '18', '19', '20', '21', '22', '23', '24', '25',
'26', '27', '28']
self.assertEqual(expected, cal.format_days())
<|reserved_special_token_1|>
import unittest
from month import Month
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(' May 2012', result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(' March 2012', result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
def test_zeller_again(self):
cal = Month(6, 2999)
self.assertEqual(6, cal.zeller())
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28']
self.assertEqual(expected, cal.days())
def test_format_days(self):
cal = Month(2, 1990)
expected = [' ', ' ', ' ', ' ', ' 1', ' 2', ' 3', ' 4', ' 5',
' 6', ' 7', ' 8', ' 9', '10', '11', '12', '13', '14', '15',
'16', '17', '18', '19', '20', '21', '22', '23', '24', '25',
'26', '27', '28']
self.assertEqual(expected, cal.format_days())
<|reserved_special_token_1|>
import unittest
from month import Month
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(" May 2012", result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(" March 2012", result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
def test_zeller_again(self):
cal = Month(6, 2999)
self.assertEqual(6, cal.zeller())
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([" "," "," "," "], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [" 1"," 2"," 3"," 4"," 5"," 6"," 7"," 8"," 9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28"]
self.assertEqual(expected, cal.days())
def test_format_days(self):
cal = Month(2, 1990)
expected = [" "," "," "," "," 1"," 2"," 3"," 4"," 5"," 6"," 7"," 8"," 9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28"]
self.assertEqual(expected, cal.format_days())
|
flexible
|
{
"blob_id": "36c1d75171d772138b820651e11a3a7bc3a6521c",
"index": 8226,
"step-1": "<mask token>\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n <mask token>\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n\n def test_zeller_again(self):\n cal = Month(6, 2999)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n\n def test_zeller_again(self):\n cal = Month(6, 2999)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n\n def test_format_days(self):\n cal = Month(2, 1990)\n expected = [' ', ' ', ' ', ' ', ' 1', ' 2', ' 3', ' 4', ' 5',\n ' 6', ' 7', ' 8', ' 9', '10', '11', '12', '13', '14', '15',\n '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',\n '26', '27', '28']\n self.assertEqual(expected, cal.format_days())\n",
"step-4": "import unittest\nfrom month import Month\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n\n def test_zeller_again(self):\n cal = Month(6, 2999)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n\n def test_format_days(self):\n cal = Month(2, 1990)\n expected = [' ', ' ', ' ', ' ', ' 1', ' 2', ' 3', ' 4', ' 5',\n ' 6', ' 7', ' 8', ' 9', '10', '11', '12', '13', '14', '15',\n '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',\n '26', '27', '28']\n self.assertEqual(expected, cal.format_days())\n",
"step-5": "import unittest\nfrom month import Month\n\nclass MonthUnitTests(unittest.TestCase):\n\n\tdef test_header(self):\n\t\tcal = Month(5, 2012)\n\t\tresult = cal.header()\n\t\tself.assertEqual(\" May 2012\", result)\n\n\tdef test_header_different_month(self):\n\t\tcal = Month(3, 2012)\n\t\tresult = cal.header()\n\t\tself.assertEqual(\" March 2012\", result)\n\n\tdef test_zeller(self):\n\t\tcal = Month(3, 1995)\n\t\tresult = cal.zeller()\n\t\tself.assertEqual(3, result)\n\n\tdef test_zeller_again(self):\n\t\tcal = Month(6, 2999)\n\t\tself.assertEqual(6, cal.zeller())\n\n\tdef test_zeller_january(self):\n\t\tcal = Month(1, 2000)\n\t\tself.assertEqual(6, cal.zeller())\n\n\tdef test_zeller_february(self):\n\t\tcal = Month(2, 2000)\n\t\tself.assertEqual(2, cal.zeller())\n\n\tdef test_number_of_days(self):\n\t\tcal = Month(6, 1900)\n\t\tself.assertEqual(30, cal.days_number())\n\n\tdef test_number_of_days_february(self):\n\t\tcal = Month(2, 1995)\n\t\tself.assertEqual(28, cal.days_number())\n\n\tdef test_number_of_days_leap_year(self):\n\t\tcal = Month(2, 1996)\n\t\tself.assertEqual(29, cal.days_number())\n\n\tdef test_number_of_days_leap_century(self):\n\t\tcal = Month(2, 2000)\n\t\tself.assertEqual(29, cal.days_number())\n\n\tdef test_number_of_days_non_leap_century(self):\n\t\tcal = Month(2, 1900)\n\t\tself.assertEqual(28, cal.days_number())\n\n\tdef test_blank_spaces(self):\n\t\tcal = Month(2, 1990)\n\t\tself.assertEqual([\" \",\" \",\" \",\" \"], cal.spaces())\n\n\tdef test_days(self):\n\t\tcal = Month(2, 1990)\n\t\texpected = [\" 1\",\" 2\",\" 3\",\" 4\",\" 5\",\" 6\",\" 7\",\" 8\",\" 9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\"]\n\t\tself.assertEqual(expected, cal.days())\n\n\tdef test_format_days(self):\n\t\tcal = Month(2, 1990)\n\t\texpected = [\" \",\" \",\" \",\" \",\" 1\",\" 2\",\" 3\",\" 4\",\" 5\",\" 6\",\" 7\",\" 8\",\" 9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\"]\n\t\tself.assertEqual(expected, cal.format_days())",
"step-ids": [
13,
14,
15,
16,
17
]
}
|
[
13,
14,
15,
16,
17
] |
# encoding:UTF-8
# 题目:斐波那契数列。
def fib(n):
if n==1 or n==2:
return 1
return fib(n-1)+fib(n-2)
print (fib(10))
|
normal
|
{
"blob_id": "59376f6565cd72e20087609253a41c04c6327a27",
"index": 6324,
"step-1": "<mask token>\n",
"step-2": "def fib(n):\n if n == 1 or n == 2:\n return 1\n return fib(n - 1) + fib(n - 2)\n\n\n<mask token>\n",
"step-3": "def fib(n):\n if n == 1 or n == 2:\n return 1\n return fib(n - 1) + fib(n - 2)\n\n\nprint(fib(10))\n",
"step-4": "# encoding:UTF-8\n# 题目:斐波那契数列。\ndef fib(n):\n\tif n==1 or n==2:\n\t\treturn 1\n\treturn fib(n-1)+fib(n-2)\nprint (fib(10))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db_handle.drop_database(DB_NAME)
<|reserved_special_token_0|>
with open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:
for user_row in user_fh:
user_row = user_row.rstrip()
if user_row:
username, email, role = user_row.split(',')
user_data = {'username': username, 'email': email, 'role': role}
user_collection = weather_dbh[USER_COLLECTION]
user_collection.insert_one(user_data)
with open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
device_id, desc, type, manufacturer = device_row.split(',')
device_data = {'device_id': device_id, 'desc': desc, 'type': type,
'manufacturer': manufacturer}
device_collection = weather_dbh[DEVICE_COLLECTION]
device_collection.insert_one(device_data)
<|reserved_special_token_0|>
with open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
device_id, _, type, _ = device_row.split(',')
for day in range(1, 6):
day1 = datetime(2020, 12, day)
devdaylist.append((device_id, day1))
for hour in range(0, 24):
timestamp = datetime(2020, 12, day, hour, 30, 0)
if type.lower() == 'temperature':
value = int(random.normalvariate(24, 2.2))
elif type.lower() == 'humidity':
value = int(random.normalvariate(45, 3))
weather_data = {'device_id': device_id, 'value': value,
'timestamp': timestamp}
weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]
weather_data_collection.insert_one(weather_data)
<|reserved_special_token_0|>
for ddy in devdaylist:
drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
HOST = '127.0.0.1'
PORT = '27017'
RELATIVE_CONFIG_PATH = '../config/'
DB_NAME = 'weather_db'
USER_COLLECTION = 'users'
DEVICE_COLLECTION = 'devices'
WEATHER_DATA_COLLECTION = 'weather_data'
DAILY_REPORT_MODEL = 'daily_report_model'
db_handle = MongoClient(f'mongodb://{HOST}:{PORT}')
db_handle.drop_database(DB_NAME)
weather_dbh = db_handle[DB_NAME]
with open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:
for user_row in user_fh:
user_row = user_row.rstrip()
if user_row:
username, email, role = user_row.split(',')
user_data = {'username': username, 'email': email, 'role': role}
user_collection = weather_dbh[USER_COLLECTION]
user_collection.insert_one(user_data)
with open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
device_id, desc, type, manufacturer = device_row.split(',')
device_data = {'device_id': device_id, 'desc': desc, 'type': type,
'manufacturer': manufacturer}
device_collection = weather_dbh[DEVICE_COLLECTION]
device_collection.insert_one(device_data)
devdaylist = []
with open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
device_id, _, type, _ = device_row.split(',')
for day in range(1, 6):
day1 = datetime(2020, 12, day)
devdaylist.append((device_id, day1))
for hour in range(0, 24):
timestamp = datetime(2020, 12, day, hour, 30, 0)
if type.lower() == 'temperature':
value = int(random.normalvariate(24, 2.2))
elif type.lower() == 'humidity':
value = int(random.normalvariate(45, 3))
weather_data = {'device_id': device_id, 'value': value,
'timestamp': timestamp}
weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]
weather_data_collection.insert_one(weather_data)
drm = DailyReportModel()
for ddy in devdaylist:
drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')
<|reserved_special_token_1|>
import random
from pymongo import MongoClient
from datetime import datetime
from model import DailyReportModel
HOST = '127.0.0.1'
PORT = '27017'
RELATIVE_CONFIG_PATH = '../config/'
DB_NAME = 'weather_db'
USER_COLLECTION = 'users'
DEVICE_COLLECTION = 'devices'
WEATHER_DATA_COLLECTION = 'weather_data'
DAILY_REPORT_MODEL = 'daily_report_model'
db_handle = MongoClient(f'mongodb://{HOST}:{PORT}')
db_handle.drop_database(DB_NAME)
weather_dbh = db_handle[DB_NAME]
with open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:
for user_row in user_fh:
user_row = user_row.rstrip()
if user_row:
username, email, role = user_row.split(',')
user_data = {'username': username, 'email': email, 'role': role}
user_collection = weather_dbh[USER_COLLECTION]
user_collection.insert_one(user_data)
with open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
device_id, desc, type, manufacturer = device_row.split(',')
device_data = {'device_id': device_id, 'desc': desc, 'type': type,
'manufacturer': manufacturer}
device_collection = weather_dbh[DEVICE_COLLECTION]
device_collection.insert_one(device_data)
devdaylist = []
with open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
device_id, _, type, _ = device_row.split(',')
for day in range(1, 6):
day1 = datetime(2020, 12, day)
devdaylist.append((device_id, day1))
for hour in range(0, 24):
timestamp = datetime(2020, 12, day, hour, 30, 0)
if type.lower() == 'temperature':
value = int(random.normalvariate(24, 2.2))
elif type.lower() == 'humidity':
value = int(random.normalvariate(45, 3))
weather_data = {'device_id': device_id, 'value': value,
'timestamp': timestamp}
weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]
weather_data_collection.insert_one(weather_data)
drm = DailyReportModel()
for ddy in devdaylist:
drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')
<|reserved_special_token_1|>
import random
# Imports MongoClient for base level access to the local MongoDB
from pymongo import MongoClient
# Imports datetime class to create timestamp for weather data storage
from datetime import datetime
# Importing DailyReportModel class to use the implemented method to insert data into daily_report_model collection
from model import DailyReportModel
# Database host ip and port information
HOST = '127.0.0.1'
PORT = '27017'
RELATIVE_CONFIG_PATH = '../config/'
DB_NAME = 'weather_db'
USER_COLLECTION = 'users'
DEVICE_COLLECTION = 'devices'
WEATHER_DATA_COLLECTION = 'weather_data'
DAILY_REPORT_MODEL = 'daily_report_model'
# This will initiate connection to the mongodb
db_handle = MongoClient(f'mongodb://{HOST}:{PORT}')
# We drop the existing database including all the collections and data
db_handle.drop_database(DB_NAME)
# We recreate the database with the same name
weather_dbh = db_handle[DB_NAME]
# user data import
# User document contains username (String), email (String), and role (String) fields
# Reads users.csv one line at a time, splits them into the data fields and inserts
with open(RELATIVE_CONFIG_PATH+USER_COLLECTION+'.csv', 'r') as user_fh:
for user_row in user_fh:
user_row = user_row.rstrip()
if user_row:
(username, email, role) = user_row.split(',')
user_data = {'username': username, 'email': email, 'role': role}
# This creates and return a pointer to the users collection
user_collection = weather_dbh[USER_COLLECTION]
# This inserts the data item as a document in the user collection
user_collection.insert_one(user_data)
# device data import
# Device document contains device_id (String), desc (String), type (String - temperature/humidity) and manufacturer (String) fields
# Reads devices.csv one line at a time, splits them into the data fields and inserts
with open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
(device_id, desc, type, manufacturer) = device_row.split(',')
device_data = {'device_id': device_id, 'desc': desc, 'type': type, 'manufacturer': manufacturer}
# This creates and return a pointer to the devices collection
device_collection = weather_dbh[DEVICE_COLLECTION]
# This inserts the data item as a document in the devices collection
device_collection.insert_one(device_data)
# weather data generation
# Weather data document contains device_id (String), value (Integer), and timestamp (Date) fields
# Reads devices.csv one line at a time to get device id and type. It then loops for five days (2020-12-01 to 2020-12-05
# For each device and day, it creates random values for each hour (at the 30 minute mark) and stores the data
#Created a list to populate it with device id and timestamp
devdaylist = []
with open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
# _ can be used to ignore values that are not needed
(device_id, _, type, _) = device_row.split(',')
for day in range(1,6):
#creating and appending data to the list
day1 = datetime(2020, 12, day)
devdaylist.append((device_id, day1))
for hour in range(0,24):
timestamp = datetime(2020, 12, day, hour, 30, 0)
# Generates random data value in appropriate range as per the type of sensor (normal bell-curve distribution)
if (type.lower() == 'temperature'):
value = int(random.normalvariate(24,2.2))
elif (type.lower() == 'humidity'):
value = int(random.normalvariate(45,3))
weather_data = {'device_id': device_id, 'value': value, 'timestamp': timestamp}
weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]
# This inserts the data item as a document in the weather_data collection
weather_data_collection.insert_one(weather_data)
#Populating the data to daily_report_model collection on setup
drm = DailyReportModel()
for ddy in devdaylist:
drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')
|
flexible
|
{
"blob_id": "a8b1b218e6649545000803c91c803580cfdbd4f1",
"index": 459,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb_handle.drop_database(DB_NAME)\n<mask token>\nwith open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:\n for user_row in user_fh:\n user_row = user_row.rstrip()\n if user_row:\n username, email, role = user_row.split(',')\n user_data = {'username': username, 'email': email, 'role': role}\n user_collection = weather_dbh[USER_COLLECTION]\n user_collection.insert_one(user_data)\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, desc, type, manufacturer = device_row.split(',')\n device_data = {'device_id': device_id, 'desc': desc, 'type': type,\n 'manufacturer': manufacturer}\n device_collection = weather_dbh[DEVICE_COLLECTION]\n device_collection.insert_one(device_data)\n<mask token>\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, _, type, _ = device_row.split(',')\n for day in range(1, 6):\n day1 = datetime(2020, 12, day)\n devdaylist.append((device_id, day1))\n for hour in range(0, 24):\n timestamp = datetime(2020, 12, day, hour, 30, 0)\n if type.lower() == 'temperature':\n value = int(random.normalvariate(24, 2.2))\n elif type.lower() == 'humidity':\n value = int(random.normalvariate(45, 3))\n weather_data = {'device_id': device_id, 'value': value,\n 'timestamp': timestamp}\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\n weather_data_collection.insert_one(weather_data)\n<mask token>\nfor ddy in devdaylist:\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\n",
"step-3": "<mask token>\nHOST = '127.0.0.1'\nPORT = '27017'\nRELATIVE_CONFIG_PATH = '../config/'\nDB_NAME = 'weather_db'\nUSER_COLLECTION = 'users'\nDEVICE_COLLECTION = 'devices'\nWEATHER_DATA_COLLECTION = 'weather_data'\nDAILY_REPORT_MODEL = 'daily_report_model'\ndb_handle = MongoClient(f'mongodb://{HOST}:{PORT}')\ndb_handle.drop_database(DB_NAME)\nweather_dbh = db_handle[DB_NAME]\nwith open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:\n for user_row in user_fh:\n user_row = user_row.rstrip()\n if user_row:\n username, email, role = user_row.split(',')\n user_data = {'username': username, 'email': email, 'role': role}\n user_collection = weather_dbh[USER_COLLECTION]\n user_collection.insert_one(user_data)\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, desc, type, manufacturer = device_row.split(',')\n device_data = {'device_id': device_id, 'desc': desc, 'type': type,\n 'manufacturer': manufacturer}\n device_collection = weather_dbh[DEVICE_COLLECTION]\n device_collection.insert_one(device_data)\ndevdaylist = []\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, _, type, _ = device_row.split(',')\n for day in range(1, 6):\n day1 = datetime(2020, 12, day)\n devdaylist.append((device_id, day1))\n for hour in range(0, 24):\n timestamp = datetime(2020, 12, day, hour, 30, 0)\n if type.lower() == 'temperature':\n value = int(random.normalvariate(24, 2.2))\n elif type.lower() == 'humidity':\n value = int(random.normalvariate(45, 3))\n weather_data = {'device_id': device_id, 'value': value,\n 'timestamp': timestamp}\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\n weather_data_collection.insert_one(weather_data)\ndrm = DailyReportModel()\nfor ddy in devdaylist:\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\n",
"step-4": "import random\nfrom pymongo import MongoClient\nfrom datetime import datetime\nfrom model import DailyReportModel\nHOST = '127.0.0.1'\nPORT = '27017'\nRELATIVE_CONFIG_PATH = '../config/'\nDB_NAME = 'weather_db'\nUSER_COLLECTION = 'users'\nDEVICE_COLLECTION = 'devices'\nWEATHER_DATA_COLLECTION = 'weather_data'\nDAILY_REPORT_MODEL = 'daily_report_model'\ndb_handle = MongoClient(f'mongodb://{HOST}:{PORT}')\ndb_handle.drop_database(DB_NAME)\nweather_dbh = db_handle[DB_NAME]\nwith open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:\n for user_row in user_fh:\n user_row = user_row.rstrip()\n if user_row:\n username, email, role = user_row.split(',')\n user_data = {'username': username, 'email': email, 'role': role}\n user_collection = weather_dbh[USER_COLLECTION]\n user_collection.insert_one(user_data)\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, desc, type, manufacturer = device_row.split(',')\n device_data = {'device_id': device_id, 'desc': desc, 'type': type,\n 'manufacturer': manufacturer}\n device_collection = weather_dbh[DEVICE_COLLECTION]\n device_collection.insert_one(device_data)\ndevdaylist = []\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, _, type, _ = device_row.split(',')\n for day in range(1, 6):\n day1 = datetime(2020, 12, day)\n devdaylist.append((device_id, day1))\n for hour in range(0, 24):\n timestamp = datetime(2020, 12, day, hour, 30, 0)\n if type.lower() == 'temperature':\n value = int(random.normalvariate(24, 2.2))\n elif type.lower() == 'humidity':\n value = int(random.normalvariate(45, 3))\n weather_data = {'device_id': device_id, 'value': value,\n 'timestamp': timestamp}\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\n weather_data_collection.insert_one(weather_data)\ndrm = DailyReportModel()\nfor ddy in devdaylist:\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\n",
"step-5": "import random\r\n# Imports MongoClient for base level access to the local MongoDB\r\nfrom pymongo import MongoClient\r\n# Imports datetime class to create timestamp for weather data storage\r\nfrom datetime import datetime\r\n# Importing DailyReportModel class to use the implemented method to insert data into daily_report_model collection\r\nfrom model import DailyReportModel\r\n\r\n\r\n# Database host ip and port information\r\nHOST = '127.0.0.1'\r\nPORT = '27017'\r\n\r\nRELATIVE_CONFIG_PATH = '../config/'\r\n\r\nDB_NAME = 'weather_db'\r\nUSER_COLLECTION = 'users'\r\nDEVICE_COLLECTION = 'devices'\r\nWEATHER_DATA_COLLECTION = 'weather_data'\r\nDAILY_REPORT_MODEL = 'daily_report_model'\r\n\r\n# This will initiate connection to the mongodb\r\ndb_handle = MongoClient(f'mongodb://{HOST}:{PORT}')\r\n\r\n# We drop the existing database including all the collections and data\r\ndb_handle.drop_database(DB_NAME)\r\n\r\n# We recreate the database with the same name\r\nweather_dbh = db_handle[DB_NAME]\r\n\r\n\r\n# user data import\r\n# User document contains username (String), email (String), and role (String) fields\r\n# Reads users.csv one line at a time, splits them into the data fields and inserts\r\nwith open(RELATIVE_CONFIG_PATH+USER_COLLECTION+'.csv', 'r') as user_fh:\r\n for user_row in user_fh:\r\n user_row = user_row.rstrip()\r\n if user_row:\r\n (username, email, role) = user_row.split(',')\r\n user_data = {'username': username, 'email': email, 'role': role}\r\n \r\n # This creates and return a pointer to the users collection\r\n user_collection = weather_dbh[USER_COLLECTION]\r\n \r\n # This inserts the data item as a document in the user collection\r\n user_collection.insert_one(user_data)\r\n\r\n\r\n# device data import\r\n# Device document contains device_id (String), desc (String), type (String - temperature/humidity) and manufacturer (String) fields\r\n# Reads devices.csv one line at a time, splits them into the data fields and inserts\r\nwith open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:\r\n for device_row in device_fh:\r\n device_row = device_row.rstrip()\r\n if device_row:\r\n (device_id, desc, type, manufacturer) = device_row.split(',')\r\n device_data = {'device_id': device_id, 'desc': desc, 'type': type, 'manufacturer': manufacturer}\r\n \r\n # This creates and return a pointer to the devices collection\r\n device_collection = weather_dbh[DEVICE_COLLECTION]\r\n \r\n # This inserts the data item as a document in the devices collection\r\n device_collection.insert_one(device_data)\r\n\r\n\r\n# weather data generation\r\n# Weather data document contains device_id (String), value (Integer), and timestamp (Date) fields\r\n# Reads devices.csv one line at a time to get device id and type. It then loops for five days (2020-12-01 to 2020-12-05\r\n# For each device and day, it creates random values for each hour (at the 30 minute mark) and stores the data\r\n\r\n#Created a list to populate it with device id and timestamp\r\ndevdaylist = []\r\nwith open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:\r\n for device_row in device_fh:\r\n device_row = device_row.rstrip()\r\n if device_row:\r\n # _ can be used to ignore values that are not needed\r\n (device_id, _, type, _) = device_row.split(',')\r\n for day in range(1,6):\r\n #creating and appending data to the list\r\n day1 = datetime(2020, 12, day)\r\n devdaylist.append((device_id, day1))\r\n for hour in range(0,24):\r\n timestamp = datetime(2020, 12, day, hour, 30, 0)\r\n # Generates random data value in appropriate range as per the type of sensor (normal bell-curve distribution)\r\n if (type.lower() == 'temperature'):\r\n value = int(random.normalvariate(24,2.2))\r\n elif (type.lower() == 'humidity'):\r\n value = int(random.normalvariate(45,3))\r\n weather_data = {'device_id': device_id, 'value': value, 'timestamp': timestamp}\r\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\r\n \r\n # This inserts the data item as a document in the weather_data collection\r\n weather_data_collection.insert_one(weather_data)\r\n \r\n\r\n\r\n#Populating the data to daily_report_model collection on setup\r\ndrm = DailyReportModel()\r\nfor ddy in devdaylist:\r\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\r\n \r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from settings import *
helpMessage = '''
**Vocal / Musique**
`{0}join`
Va rejoindre le salon vocale dans laquelle vous êtes.
`{0}leave`
Va partir du salon vocale dans laquelle vous êtes.
`{0}play [YouTube Url]` *ou* `{0}play [musique ou video à rechercher]`
Commencera à jouer l'audio de la vidéo / chanson fournie.
`{0}pause`
Mettra en pause le flux audio actuel.
`{0}resume`
Va reprendre le flux audio actuel.
`{0}stop`
Arrêter et terminer le flux audio.
~~**=========================================**~~
**Administrateur**
`{0}invite`
Envoie un message personnel avec le lien d'invitation du bot. (Ne fonctionnera que pour le propriétaire du bot.)
`{0}shutdown`
Va faire la déconnexion et l'arrêt du bot. (Ne fonctionnera que pour le propriétaire du bot.)
`{0}status [status here]`
Définira le statut de jeu du bot. Ne fonctionnera que pour le propriétaire du bot. (Ne fonctionnera que pour le propriétaire du bot.)
~~**=========================================**~~
**Mini-Games**
`{0}joke`
Postes une blague aléatoire Chuck Norris.
`{0}8ball`
Pose n'importe quelle question à 8-Ball.
`{0}coinflip`
Va retourner une pièce et afficher le résultat.
`{0}roll [# of dice] D[# of sides] Example: !roll 3 D6`
Va lancer les dés spécifiés et poster le résultat.
`{0}slots`
Va poster un résultat de machine à sous.
~~**=========================================**~~
**Random Commandes**
`{0}cat`
Va poster une image de chat aléatoire ou gif.
`{0}catfact (ACTUELLEMENT HORS DE COMMANDE INDISPONIBLE)`
Va poster un fait de chat au hasard.
`{0}catgif`
Va poster un gif de chat aléatoire.
`{0}dog`
Va poster une image de chien aléatoire.
`{0}rabbit`
Va poster une image de lapin aléatoire.
`{0}face`
Poste un visage random depuis une DB de +270 visages
~~**=========================================**~~
**Jeux**
`{0}hots [hotslogs player ID]` - Example: !hots 3141592
Publiera le MMR du joueur pour le match rapide et la ligue des héros.
`{0}gwent [Nom de la Carte]` - Example: !gwent Geralt
Va poster la description de la carte et l'image de la carte gwent. A une longueur de recherche maximale de 10 caractères.'''.format(config.COMMANDPREFIX)
|
normal
|
{
"blob_id": "f7283750923e1e430ff1f648878bbb9a0c73d2c4",
"index": 7880,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhelpMessage = (\n \"\"\"\n**Vocal / Musique**\n\n`{0}join`\nVa rejoindre le salon vocale dans laquelle vous êtes.\n\n`{0}leave`\nVa partir du salon vocale dans laquelle vous êtes.\n\n`{0}play [YouTube Url]` *ou* `{0}play [musique ou video à rechercher]`\nCommencera à jouer l'audio de la vidéo / chanson fournie.\n\n`{0}pause`\nMettra en pause le flux audio actuel.\n\n`{0}resume`\nVa reprendre le flux audio actuel.\n\n`{0}stop`\nArrêter et terminer le flux audio.\n\n~~**=========================================**~~\n\n**Administrateur**\n\n`{0}invite`\nEnvoie un message personnel avec le lien d'invitation du bot. (Ne fonctionnera que pour le propriétaire du bot.)\n\n`{0}shutdown`\nVa faire la déconnexion et l'arrêt du bot. (Ne fonctionnera que pour le propriétaire du bot.)\n\n`{0}status [status here]`\nDéfinira le statut de jeu du bot. Ne fonctionnera que pour le propriétaire du bot. (Ne fonctionnera que pour le propriétaire du bot.)\n\n~~**=========================================**~~\n\n**Mini-Games**\n\n`{0}joke`\nPostes une blague aléatoire Chuck Norris.\n\n`{0}8ball`\nPose n'importe quelle question à 8-Ball.\n\n`{0}coinflip`\nVa retourner une pièce et afficher le résultat.\n\n`{0}roll [# of dice] D[# of sides] Example: !roll 3 D6`\nVa lancer les dés spécifiés et poster le résultat.\n\n`{0}slots`\nVa poster un résultat de machine à sous.\n\n~~**=========================================**~~\n\n**Random Commandes**\n\n`{0}cat`\nVa poster une image de chat aléatoire ou gif.\n\n`{0}catfact (ACTUELLEMENT HORS DE COMMANDE INDISPONIBLE)`\nVa poster un fait de chat au hasard.\n\n`{0}catgif`\nVa poster un gif de chat aléatoire.\n\n`{0}dog`\nVa poster une image de chien aléatoire.\n\n`{0}rabbit`\nVa poster une image de lapin aléatoire.\n\n`{0}face`\nPoste un visage random depuis une DB de +270 visages\n\n~~**=========================================**~~\n\n**Jeux**\n\n`{0}hots [hotslogs player ID]` - Example: !hots 3141592\nPubliera le MMR du joueur pour le match rapide et la ligue des héros.\n\n`{0}gwent [Nom de la Carte]` - Example: !gwent Geralt\nVa poster la description de la carte et l'image de la carte gwent. A une longueur de recherche maximale de 10 caractères.\"\"\"\n .format(config.COMMANDPREFIX))\n",
"step-3": "from settings import *\nhelpMessage = (\n \"\"\"\n**Vocal / Musique**\n\n`{0}join`\nVa rejoindre le salon vocale dans laquelle vous êtes.\n\n`{0}leave`\nVa partir du salon vocale dans laquelle vous êtes.\n\n`{0}play [YouTube Url]` *ou* `{0}play [musique ou video à rechercher]`\nCommencera à jouer l'audio de la vidéo / chanson fournie.\n\n`{0}pause`\nMettra en pause le flux audio actuel.\n\n`{0}resume`\nVa reprendre le flux audio actuel.\n\n`{0}stop`\nArrêter et terminer le flux audio.\n\n~~**=========================================**~~\n\n**Administrateur**\n\n`{0}invite`\nEnvoie un message personnel avec le lien d'invitation du bot. (Ne fonctionnera que pour le propriétaire du bot.)\n\n`{0}shutdown`\nVa faire la déconnexion et l'arrêt du bot. (Ne fonctionnera que pour le propriétaire du bot.)\n\n`{0}status [status here]`\nDéfinira le statut de jeu du bot. Ne fonctionnera que pour le propriétaire du bot. (Ne fonctionnera que pour le propriétaire du bot.)\n\n~~**=========================================**~~\n\n**Mini-Games**\n\n`{0}joke`\nPostes une blague aléatoire Chuck Norris.\n\n`{0}8ball`\nPose n'importe quelle question à 8-Ball.\n\n`{0}coinflip`\nVa retourner une pièce et afficher le résultat.\n\n`{0}roll [# of dice] D[# of sides] Example: !roll 3 D6`\nVa lancer les dés spécifiés et poster le résultat.\n\n`{0}slots`\nVa poster un résultat de machine à sous.\n\n~~**=========================================**~~\n\n**Random Commandes**\n\n`{0}cat`\nVa poster une image de chat aléatoire ou gif.\n\n`{0}catfact (ACTUELLEMENT HORS DE COMMANDE INDISPONIBLE)`\nVa poster un fait de chat au hasard.\n\n`{0}catgif`\nVa poster un gif de chat aléatoire.\n\n`{0}dog`\nVa poster une image de chien aléatoire.\n\n`{0}rabbit`\nVa poster une image de lapin aléatoire.\n\n`{0}face`\nPoste un visage random depuis une DB de +270 visages\n\n~~**=========================================**~~\n\n**Jeux**\n\n`{0}hots [hotslogs player ID]` - Example: !hots 3141592\nPubliera le MMR du joueur pour le match rapide et la ligue des héros.\n\n`{0}gwent [Nom de la Carte]` - Example: !gwent Geralt\nVa poster la description de la carte et l'image de la carte gwent. A une longueur de recherche maximale de 10 caractères.\"\"\"\n .format(config.COMMANDPREFIX))\n",
"step-4": "from settings import *\r\n\r\nhelpMessage = '''\r\n**Vocal / Musique**\r\n\r\n`{0}join`\r\nVa rejoindre le salon vocale dans laquelle vous êtes.\r\n\r\n`{0}leave`\r\nVa partir du salon vocale dans laquelle vous êtes.\r\n\r\n`{0}play [YouTube Url]` *ou* `{0}play [musique ou video à rechercher]`\r\nCommencera à jouer l'audio de la vidéo / chanson fournie.\r\n\r\n`{0}pause`\r\nMettra en pause le flux audio actuel.\r\n\r\n`{0}resume`\r\nVa reprendre le flux audio actuel.\r\n\r\n`{0}stop`\r\nArrêter et terminer le flux audio.\r\n\r\n~~**=========================================**~~\r\n\r\n**Administrateur**\r\n\r\n`{0}invite`\r\nEnvoie un message personnel avec le lien d'invitation du bot. (Ne fonctionnera que pour le propriétaire du bot.)\r\n\r\n`{0}shutdown`\r\nVa faire la déconnexion et l'arrêt du bot. (Ne fonctionnera que pour le propriétaire du bot.)\r\n\r\n`{0}status [status here]`\r\nDéfinira le statut de jeu du bot. Ne fonctionnera que pour le propriétaire du bot. (Ne fonctionnera que pour le propriétaire du bot.)\r\n\r\n~~**=========================================**~~\r\n\r\n**Mini-Games**\r\n\r\n`{0}joke`\r\nPostes une blague aléatoire Chuck Norris.\r\n\r\n`{0}8ball`\r\nPose n'importe quelle question à 8-Ball.\r\n\r\n`{0}coinflip`\r\nVa retourner une pièce et afficher le résultat.\r\n\r\n`{0}roll [# of dice] D[# of sides] Example: !roll 3 D6`\r\nVa lancer les dés spécifiés et poster le résultat.\r\n\r\n`{0}slots`\r\nVa poster un résultat de machine à sous.\r\n\r\n~~**=========================================**~~\r\n\r\n**Random Commandes**\r\n\r\n`{0}cat`\r\nVa poster une image de chat aléatoire ou gif.\r\n\r\n`{0}catfact (ACTUELLEMENT HORS DE COMMANDE INDISPONIBLE)`\r\nVa poster un fait de chat au hasard.\r\n\r\n`{0}catgif`\r\nVa poster un gif de chat aléatoire.\r\n\r\n`{0}dog`\r\nVa poster une image de chien aléatoire.\r\n\r\n`{0}rabbit`\r\nVa poster une image de lapin aléatoire.\r\n\r\n`{0}face`\r\nPoste un visage random depuis une DB de +270 visages\r\n\r\n~~**=========================================**~~\r\n\r\n**Jeux**\r\n\r\n`{0}hots [hotslogs player ID]` - Example: !hots 3141592\r\nPubliera le MMR du joueur pour le match rapide et la ligue des héros.\r\n\r\n`{0}gwent [Nom de la Carte]` - Example: !gwent Geralt\r\nVa poster la description de la carte et l'image de la carte gwent. A une longueur de recherche maximale de 10 caractères.'''.format(config.COMMANDPREFIX)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pickle
import select
import socket
import sys
from threading import Thread
from typing import Dict, Tuple
import pygame
from pygame.locals import *
import c
from models import *
class Game:
location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]
velocity: list[int, int] = [0, 0]
current_player: Player = None
other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}
connection: socket.socket
font: pygame.font.Font
def __init__(self):
pygame.init()
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))
pygame.display.set_caption('Socket Game')
self.clock = pygame.time.Clock()
self.screen.fill('white')
self.font = pygame.font.SysFont(None, c.FONT_SIZE)
def start(self):
self.connect_to_server()
while True:
self.game_loop()
def connect_to_server(self):
self.connection.connect((c.HOST, c.PORT))
def listen_to_server(self):
ins, outs, ex = select.select([self.connection], [], [], 0)
for inm in ins:
received_data = inm.recv(c.BUFFSIZE)
event: Event = pickle.loads(received_data)
print("<<<", event)
if isinstance(event, CurrentPlayerEvent):
pygame.display.set_caption(f'Socket Game - {event.player.nickname}')
self.current_player = event.player
elif isinstance(event, PlayerDidMoveEvent):
self.update_player(event.player, event.location)
elif isinstance(event, PlayerJoinedEvent):
self.update_player(event.player)
def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT / 2)):
self.other_players[player.nickname] = (player, location)
def update_server(self):
if self.current_player is not None:
self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.current_player, (
self.location[0], self.location[1],
))))
def game_loop(self):
self.listen_to_server()
self.event_handling()
self.update_location()
self.render()
self.update_server()
self.clock.tick(60)
def update_location(self):
oldx, oldy = self.location
vx, vy = self.velocity
newx, newy = oldx + vx, oldy + vy
if newx > c.WIDTH - c.PLAYER_SIZE:
newx = c.WIDTH - c.PLAYER_SIZE
if newx < 0:
newx = 0
if newy > c.HEIGHT - c.PLAYER_SIZE:
newy = c.HEIGHT - c.PLAYER_SIZE
if newy < 0:
newy = 0
self.location = [newx, newy]
def render_player(self, player: Player, location: Tuple[int, int]):
x, y = location
img = self.font.render(player.nickname, True, player.color)
pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c.PLAYER_SIZE))
self.screen.blit(img, (x, y - img.get_height()))
def render(self):
self.screen.fill((255, 255, 255))
if self.current_player is not None:
self.render_player(self.current_player, (self.location[0], self.location[1]))
for nickname, (player, location) in self.other_players.items():
self.render_player(player, location)
pygame.display.flip()
def event_handling(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT: self.velocity[0] = -c.MOVEMENT_SPEED
if event.key == K_RIGHT: self.velocity[0] = c.MOVEMENT_SPEED
if event.key == K_UP: self.velocity[1] = -c.MOVEMENT_SPEED
if event.key == K_DOWN: self.velocity[1] = c.MOVEMENT_SPEED
if event.type == KEYUP:
if event.key == K_LEFT: self.velocity[0] = 0
if event.key == K_RIGHT: self.velocity[0] = 0
if event.key == K_UP: self.velocity[1] = 0
if event.key == K_DOWN: self.velocity[1] = 0
if __name__ == "__main__":
s = Game()
s.start()
|
normal
|
{
"blob_id": "418798369578e80ecbf82da802b23dc6ca922569",
"index": 7107,
"step-1": "<mask token>\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n <mask token>\n <mask token>\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n <mask token>\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n\n def start(self):\n self.connect_to_server()\n while True:\n self.game_loop()\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\n<mask token>\n",
"step-4": "import pickle\nimport select\nimport socket\nimport sys\nfrom threading import Thread\nfrom typing import Dict, Tuple\nimport pygame\nfrom pygame.locals import *\nimport c\nfrom models import *\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n\n def start(self):\n self.connect_to_server()\n while True:\n self.game_loop()\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print('<<<', event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(\n f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT /\n 2)):\n self.other_players[player.nickname] = player, location\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.\n current_player, (self.location[0], self.location[1]))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c\n .PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self\n .location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT:\n self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP:\n self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN:\n self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT:\n self.velocity[0] = 0\n if event.key == K_RIGHT:\n self.velocity[0] = 0\n if event.key == K_UP:\n self.velocity[1] = 0\n if event.key == K_DOWN:\n self.velocity[1] = 0\n\n\nif __name__ == '__main__':\n s = Game()\n s.start()\n",
"step-5": "import pickle\nimport select\nimport socket\nimport sys\nfrom threading import Thread\nfrom typing import Dict, Tuple\n\nimport pygame\nfrom pygame.locals import *\n\nimport c\nfrom models import *\n\n\nclass Game:\n location: list[int, int] = [c.WIDTH / 2, c.HEIGHT / 2]\n velocity: list[int, int] = [0, 0]\n current_player: Player = None\n other_players: Dict[str, Tuple[Player, Tuple[int, int]]] = {}\n connection: socket.socket\n font: pygame.font.Font\n\n def __init__(self):\n pygame.init()\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.screen = pygame.display.set_mode((c.WIDTH, c.HEIGHT))\n pygame.display.set_caption('Socket Game')\n self.clock = pygame.time.Clock()\n self.screen.fill('white')\n self.font = pygame.font.SysFont(None, c.FONT_SIZE)\n\n def start(self):\n self.connect_to_server()\n while True:\n self.game_loop()\n\n def connect_to_server(self):\n self.connection.connect((c.HOST, c.PORT))\n\n def listen_to_server(self):\n ins, outs, ex = select.select([self.connection], [], [], 0)\n for inm in ins:\n received_data = inm.recv(c.BUFFSIZE)\n event: Event = pickle.loads(received_data)\n print(\"<<<\", event)\n if isinstance(event, CurrentPlayerEvent):\n pygame.display.set_caption(f'Socket Game - {event.player.nickname}')\n self.current_player = event.player\n elif isinstance(event, PlayerDidMoveEvent):\n self.update_player(event.player, event.location)\n elif isinstance(event, PlayerJoinedEvent):\n self.update_player(event.player)\n\n def update_player(self, player: Player, location=(c.WIDTH / 2, c.HEIGHT / 2)):\n self.other_players[player.nickname] = (player, location)\n\n def update_server(self):\n if self.current_player is not None:\n self.connection.send(pickle.dumps(PlayerDidMoveEvent(self.current_player, (\n self.location[0], self.location[1],\n ))))\n\n def game_loop(self):\n self.listen_to_server()\n self.event_handling()\n self.update_location()\n self.render()\n self.update_server()\n self.clock.tick(60)\n\n def update_location(self):\n oldx, oldy = self.location\n vx, vy = self.velocity\n newx, newy = oldx + vx, oldy + vy\n if newx > c.WIDTH - c.PLAYER_SIZE:\n newx = c.WIDTH - c.PLAYER_SIZE\n if newx < 0:\n newx = 0\n\n if newy > c.HEIGHT - c.PLAYER_SIZE:\n newy = c.HEIGHT - c.PLAYER_SIZE\n if newy < 0:\n newy = 0\n\n self.location = [newx, newy]\n\n def render_player(self, player: Player, location: Tuple[int, int]):\n x, y = location\n img = self.font.render(player.nickname, True, player.color)\n pygame.draw.rect(self.screen, player.color, (x, y, c.PLAYER_SIZE, c.PLAYER_SIZE))\n self.screen.blit(img, (x, y - img.get_height()))\n\n def render(self):\n self.screen.fill((255, 255, 255))\n if self.current_player is not None:\n self.render_player(self.current_player, (self.location[0], self.location[1]))\n for nickname, (player, location) in self.other_players.items():\n self.render_player(player, location)\n\n pygame.display.flip()\n\n def event_handling(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT: self.velocity[0] = -c.MOVEMENT_SPEED\n if event.key == K_RIGHT: self.velocity[0] = c.MOVEMENT_SPEED\n if event.key == K_UP: self.velocity[1] = -c.MOVEMENT_SPEED\n if event.key == K_DOWN: self.velocity[1] = c.MOVEMENT_SPEED\n if event.type == KEYUP:\n if event.key == K_LEFT: self.velocity[0] = 0\n if event.key == K_RIGHT: self.velocity[0] = 0\n if event.key == K_UP: self.velocity[1] = 0\n if event.key == K_DOWN: self.velocity[1] = 0\n\n\nif __name__ == \"__main__\":\n s = Game()\n s.start()\n",
"step-ids": [
10,
11,
12,
14,
15
]
}
|
[
10,
11,
12,
14,
15
] |
#--------------------------------------------------------------------------------
# G e n e r a l I n f o r m a t i o n
#--------------------------------------------------------------------------------
# Name: Exercise 2.6 - Planetary Orbits
#
# Usage: Calculate information for planetary orbits
#
# Description: Given basic information about an orbiting body, calculate the
# planetary orbit information for said orbiting body and a second object that
# is orbiting around the first body.
#
# Inputs: Distance to the Sun (length) and velocity at perihelion.
#
# Outputs: The second orbiting body's distance to the sun (L2) and velocity (v2)
# of the second body, the Orbital period (T) and the orbital eccentricity (e)
#
# Auxiliary Files:
#
# Special Instructions:
#
#--------------------------------------------------------------------------------
# C o d e H i s t o r y
#--------------------------------------------------------------------------------
# Version: 1.0
#
# Author(s): Kole Frazier
#
#--------------------------------------------------------------------------------
#Get user input for Object 1
L1 = float(input('Enter distance to the sun: '))
v1 = float(input('Enter velocity at perihelion: '))
#Constants and necessary values
G = 6.6738*10**-11 #Gravitational Constant
M = 1.9891*10**30 #Mass of the Sun
Pi = 3.141 #Pi
#For Object 2, calculate its velocity (V2) then distance to the Sun (L2)
v2 = -v1**2 + ((2*G*M)/L1) #Pretty sure this isn't right, but I cannot find anything to correct this.
L2 = (L1*v1)/v2
print('v2: {0}\tL2: {1}'.format(v2, L2))
#Calculate T and e using a and b
a = (0.5)*(L1+L2) #Semi-major axis
b = (L1*L2)**(1.0/2.0) #Semi-minor axis
T = (2*Pi*a*b)/(L1*v1) #Orbital period
e = (L2 - L1)/(L2 + L1) #Orbital eccentricity
print('T: {0}\te:{1}'.format(T,e))
|
normal
|
{
"blob_id": "83b65b951b06b117c2e85ba348e9b591865c1c2e",
"index": 3145,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('v2: {0}\\tL2: {1}'.format(v2, L2))\n<mask token>\nprint('T: {0}\\te:{1}'.format(T, e))\n",
"step-3": "L1 = float(input('Enter distance to the sun: '))\nv1 = float(input('Enter velocity at perihelion: '))\nG = 6.6738 * 10 ** -11\nM = 1.9891 * 10 ** 30\nPi = 3.141\nv2 = -v1 ** 2 + 2 * G * M / L1\nL2 = L1 * v1 / v2\nprint('v2: {0}\\tL2: {1}'.format(v2, L2))\na = 0.5 * (L1 + L2)\nb = (L1 * L2) ** (1.0 / 2.0)\nT = 2 * Pi * a * b / (L1 * v1)\ne = (L2 - L1) / (L2 + L1)\nprint('T: {0}\\te:{1}'.format(T, e))\n",
"step-4": "#--------------------------------------------------------------------------------\r\n# G e n e r a l I n f o r m a t i o n\r\n#--------------------------------------------------------------------------------\r\n# Name: Exercise 2.6 - Planetary Orbits\r\n#\r\n# Usage: Calculate information for planetary orbits\r\n#\r\n# Description: Given basic information about an orbiting body, calculate the\r\n# planetary orbit information for said orbiting body and a second object that\r\n# is orbiting around the first body.\r\n#\r\n# Inputs: Distance to the Sun (length) and velocity at perihelion.\r\n#\r\n# Outputs: The second orbiting body's distance to the sun (L2) and velocity (v2)\r\n# of the second body, the Orbital period (T) and the orbital eccentricity (e)\r\n#\r\n# Auxiliary Files:\r\n#\r\n# Special Instructions:\r\n#\r\n#--------------------------------------------------------------------------------\r\n# C o d e H i s t o r y\r\n#--------------------------------------------------------------------------------\r\n# Version: 1.0\r\n#\r\n# Author(s): Kole Frazier\r\n#\r\n#--------------------------------------------------------------------------------\r\n\r\n#Get user input for Object 1\r\nL1 = float(input('Enter distance to the sun: '))\r\nv1 = float(input('Enter velocity at perihelion: '))\r\n\r\n#Constants and necessary values\r\nG = 6.6738*10**-11 #Gravitational Constant\r\nM = 1.9891*10**30 #Mass of the Sun\r\nPi = 3.141 #Pi\r\n\r\n#For Object 2, calculate its velocity (V2) then distance to the Sun (L2)\r\nv2 = -v1**2 + ((2*G*M)/L1) #Pretty sure this isn't right, but I cannot find anything to correct this.\r\nL2 = (L1*v1)/v2\r\nprint('v2: {0}\\tL2: {1}'.format(v2, L2))\r\n\r\n#Calculate T and e using a and b\r\na = (0.5)*(L1+L2) #Semi-major axis\r\nb = (L1*L2)**(1.0/2.0) #Semi-minor axis\r\nT = (2*Pi*a*b)/(L1*v1) #Orbital period\r\ne = (L2 - L1)/(L2 + L1) #Orbital eccentricity\r\n\r\nprint('T: {0}\\te:{1}'.format(T,e))\r\n\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
<|reserved_special_token_0|>
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
def get_local_datetime(date_time):
return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
interval_length_minutes = 10
tek_rolling_period = 144
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo
=datetime.timezone.utc)
def get_local_datetime(date_time):
return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
<|reserved_special_token_1|>
import datetime
interval_length_minutes = 10 # 10 minutes per interval
tek_rolling_period = 144 # 24*60//10 - 24 hours per day, 60 minutes per hour, 10 minutes per interval
def get_timestamp_from_interval(interval_number):
return interval_number * interval_length_minutes * 60 # 60 seconds per minute
def get_datetime_from_utc_timestamp(utc_timestamp):
return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo=datetime.timezone.utc)
def get_local_datetime(date_time):
return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)
def get_string_from_datetime(date_time):
return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')
|
flexible
|
{
"blob_id": "f3bfa30f51c4a91844457c72fbf2b2b8368d8476",
"index": 1874,
"step-1": "<mask token>\n\n\ndef get_timestamp_from_interval(interval_number):\n return interval_number * interval_length_minutes * 60\n\n\ndef get_datetime_from_utc_timestamp(utc_timestamp):\n return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo\n =datetime.timezone.utc)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_timestamp_from_interval(interval_number):\n return interval_number * interval_length_minutes * 60\n\n\ndef get_datetime_from_utc_timestamp(utc_timestamp):\n return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo\n =datetime.timezone.utc)\n\n\n<mask token>\n\n\ndef get_string_from_datetime(date_time):\n return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')\n",
"step-3": "<mask token>\n\n\ndef get_timestamp_from_interval(interval_number):\n return interval_number * interval_length_minutes * 60\n\n\ndef get_datetime_from_utc_timestamp(utc_timestamp):\n return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo\n =datetime.timezone.utc)\n\n\ndef get_local_datetime(date_time):\n return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)\n\n\ndef get_string_from_datetime(date_time):\n return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')\n",
"step-4": "<mask token>\ninterval_length_minutes = 10\ntek_rolling_period = 144\n\n\ndef get_timestamp_from_interval(interval_number):\n return interval_number * interval_length_minutes * 60\n\n\ndef get_datetime_from_utc_timestamp(utc_timestamp):\n return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo\n =datetime.timezone.utc)\n\n\ndef get_local_datetime(date_time):\n return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)\n\n\ndef get_string_from_datetime(date_time):\n return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')\n",
"step-5": "import datetime\n\ninterval_length_minutes = 10 # 10 minutes per interval\ntek_rolling_period = 144 # 24*60//10 - 24 hours per day, 60 minutes per hour, 10 minutes per interval\n\n\ndef get_timestamp_from_interval(interval_number):\n return interval_number * interval_length_minutes * 60 # 60 seconds per minute\n\n\ndef get_datetime_from_utc_timestamp(utc_timestamp):\n return datetime.datetime.utcfromtimestamp(utc_timestamp).replace(tzinfo=datetime.timezone.utc)\n\n\ndef get_local_datetime(date_time):\n return date_time.astimezone(datetime.datetime.utcnow().astimezone().tzinfo)\n\n\ndef get_string_from_datetime(date_time):\n return date_time.strftime('%Y-%m-%d %H:%M:%S %Z')\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^porta/list$', porta_list, name='porta_list'), url(
'^porta/detail/(?P<pk>\\d+)$', porta_detail, name='porta_detail'), url(
'^porta/new/$', porta_new, name='porta_new'), url(
'^porta/update/(?P<pk>\\d+)$', porta_update, name='porta_update'), url(
'^porta/delete/(?P<pk>\\d+)$', porta_delete, name='porta_delete'), url(
'^porta/usuarios/(?P<pk>\\d+)$', porta_delete, name='porta_delete'),
url('^grupo/list$', grupo_list, name='grupo_list'), url(
'^grupo/detail/(?P<pk>\\d+)$', grupo_detail, name='grupo_detail'), url(
'^grupo/new/$', grupo_new, name='grupo_new'), url(
'^grupo/update/(?P<pk>\\d+)$', grupo_update, name='grupo_update'), url(
'^grupo/delete/(?P<pk>\\d+)$', grupo_delete, name='grupo_delete'), url(
'^edit/grupo/$', edit_grupo, name='edit_grupo'), url(
'^usuario/acesso/grupo/(?P<pk>\\d+)$', usuario_acesso_grupo, name=
'usuario_acesso_grupo'), url('^usuario/sem_acesso/grupo/(?P<pk>\\d+)$',
usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'), url(
'^porta/no_grupo/(?P<pk>\\d+)$', porta_no_grupo, name='porta_no_grupo'),
url('^porta/nao_grupo/(?P<pk>\\d+)$', porta_nao_grupo, name=
'porta_nao_grupo'), url('^portas/$', portas, name='portas'), url(
'^porta/busca/(?P<pk>\\d+)$', busca_porta, name='busca_porta'), url(
'^busca/porta_frequencia/$', busca_porta_frequencia, name=
'busca_frequencia_porta'), url('^frequencia_porta_acesso/$',
frequencia_porta_acesso, name='frequencia_porta_acesso'), url(
'^porta/frequencia_acesso/(?P<pk>\\d+)$', porta_frequencias, name=
'porta_frequencias')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.contrib.auth.views import login, logout
from appPortas.views import *
urlpatterns = [url('^porta/list$', porta_list, name='porta_list'), url(
'^porta/detail/(?P<pk>\\d+)$', porta_detail, name='porta_detail'), url(
'^porta/new/$', porta_new, name='porta_new'), url(
'^porta/update/(?P<pk>\\d+)$', porta_update, name='porta_update'), url(
'^porta/delete/(?P<pk>\\d+)$', porta_delete, name='porta_delete'), url(
'^porta/usuarios/(?P<pk>\\d+)$', porta_delete, name='porta_delete'),
url('^grupo/list$', grupo_list, name='grupo_list'), url(
'^grupo/detail/(?P<pk>\\d+)$', grupo_detail, name='grupo_detail'), url(
'^grupo/new/$', grupo_new, name='grupo_new'), url(
'^grupo/update/(?P<pk>\\d+)$', grupo_update, name='grupo_update'), url(
'^grupo/delete/(?P<pk>\\d+)$', grupo_delete, name='grupo_delete'), url(
'^edit/grupo/$', edit_grupo, name='edit_grupo'), url(
'^usuario/acesso/grupo/(?P<pk>\\d+)$', usuario_acesso_grupo, name=
'usuario_acesso_grupo'), url('^usuario/sem_acesso/grupo/(?P<pk>\\d+)$',
usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'), url(
'^porta/no_grupo/(?P<pk>\\d+)$', porta_no_grupo, name='porta_no_grupo'),
url('^porta/nao_grupo/(?P<pk>\\d+)$', porta_nao_grupo, name=
'porta_nao_grupo'), url('^portas/$', portas, name='portas'), url(
'^porta/busca/(?P<pk>\\d+)$', busca_porta, name='busca_porta'), url(
'^busca/porta_frequencia/$', busca_porta_frequencia, name=
'busca_frequencia_porta'), url('^frequencia_porta_acesso/$',
frequencia_porta_acesso, name='frequencia_porta_acesso'), url(
'^porta/frequencia_acesso/(?P<pk>\\d+)$', porta_frequencias, name=
'porta_frequencias')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.contrib.auth.views import login,logout
from appPortas.views import *
urlpatterns = [
url(r'^porta/list$', porta_list, name='porta_list'),
url(r'^porta/detail/(?P<pk>\d+)$',porta_detail, name='porta_detail'),
url(r'^porta/new/$', porta_new, name='porta_new'),
url(r'^porta/update/(?P<pk>\d+)$',porta_update, name='porta_update'),
url(r'^porta/delete/(?P<pk>\d+)$',porta_delete, name='porta_delete'),
url(r'^porta/usuarios/(?P<pk>\d+)$', porta_delete, name='porta_delete'),
url(r'^grupo/list$', grupo_list, name='grupo_list'),
url(r'^grupo/detail/(?P<pk>\d+)$',grupo_detail, name='grupo_detail'),
url(r'^grupo/new/$', grupo_new, name='grupo_new'),
url(r'^grupo/update/(?P<pk>\d+)$',grupo_update, name='grupo_update'),
url(r'^grupo/delete/(?P<pk>\d+)$',grupo_delete, name='grupo_delete'),
url(r'^edit/grupo/$', edit_grupo, name='edit_grupo'),
url(r'^usuario/acesso/grupo/(?P<pk>\d+)$', usuario_acesso_grupo, name='usuario_acesso_grupo'),
url(r'^usuario/sem_acesso/grupo/(?P<pk>\d+)$', usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'),
url(r'^porta/no_grupo/(?P<pk>\d+)$', porta_no_grupo, name='porta_no_grupo'),
url(r'^porta/nao_grupo/(?P<pk>\d+)$', porta_nao_grupo, name='porta_nao_grupo'),
url(r'^portas/$', portas, name='portas'),
url(r'^porta/busca/(?P<pk>\d+)$', busca_porta, name='busca_porta'),
url(r'^busca/porta_frequencia/$', busca_porta_frequencia, name='busca_frequencia_porta'),
url(r'^frequencia_porta_acesso/$', frequencia_porta_acesso, name='frequencia_porta_acesso'),
url(r'^porta/frequencia_acesso/(?P<pk>\d+)$', porta_frequencias, name='porta_frequencias'),
]
|
flexible
|
{
"blob_id": "5e355732f07029aa644617ac9b5e9ad50ee9397f",
"index": 1161,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^porta/list$', porta_list, name='porta_list'), url(\n '^porta/detail/(?P<pk>\\\\d+)$', porta_detail, name='porta_detail'), url(\n '^porta/new/$', porta_new, name='porta_new'), url(\n '^porta/update/(?P<pk>\\\\d+)$', porta_update, name='porta_update'), url(\n '^porta/delete/(?P<pk>\\\\d+)$', porta_delete, name='porta_delete'), url(\n '^porta/usuarios/(?P<pk>\\\\d+)$', porta_delete, name='porta_delete'),\n url('^grupo/list$', grupo_list, name='grupo_list'), url(\n '^grupo/detail/(?P<pk>\\\\d+)$', grupo_detail, name='grupo_detail'), url(\n '^grupo/new/$', grupo_new, name='grupo_new'), url(\n '^grupo/update/(?P<pk>\\\\d+)$', grupo_update, name='grupo_update'), url(\n '^grupo/delete/(?P<pk>\\\\d+)$', grupo_delete, name='grupo_delete'), url(\n '^edit/grupo/$', edit_grupo, name='edit_grupo'), url(\n '^usuario/acesso/grupo/(?P<pk>\\\\d+)$', usuario_acesso_grupo, name=\n 'usuario_acesso_grupo'), url('^usuario/sem_acesso/grupo/(?P<pk>\\\\d+)$',\n usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'), url(\n '^porta/no_grupo/(?P<pk>\\\\d+)$', porta_no_grupo, name='porta_no_grupo'),\n url('^porta/nao_grupo/(?P<pk>\\\\d+)$', porta_nao_grupo, name=\n 'porta_nao_grupo'), url('^portas/$', portas, name='portas'), url(\n '^porta/busca/(?P<pk>\\\\d+)$', busca_porta, name='busca_porta'), url(\n '^busca/porta_frequencia/$', busca_porta_frequencia, name=\n 'busca_frequencia_porta'), url('^frequencia_porta_acesso/$',\n frequencia_porta_acesso, name='frequencia_porta_acesso'), url(\n '^porta/frequencia_acesso/(?P<pk>\\\\d+)$', porta_frequencias, name=\n 'porta_frequencias')]\n",
"step-3": "from django.conf.urls import url\nfrom django.contrib.auth.views import login, logout\nfrom appPortas.views import *\nurlpatterns = [url('^porta/list$', porta_list, name='porta_list'), url(\n '^porta/detail/(?P<pk>\\\\d+)$', porta_detail, name='porta_detail'), url(\n '^porta/new/$', porta_new, name='porta_new'), url(\n '^porta/update/(?P<pk>\\\\d+)$', porta_update, name='porta_update'), url(\n '^porta/delete/(?P<pk>\\\\d+)$', porta_delete, name='porta_delete'), url(\n '^porta/usuarios/(?P<pk>\\\\d+)$', porta_delete, name='porta_delete'),\n url('^grupo/list$', grupo_list, name='grupo_list'), url(\n '^grupo/detail/(?P<pk>\\\\d+)$', grupo_detail, name='grupo_detail'), url(\n '^grupo/new/$', grupo_new, name='grupo_new'), url(\n '^grupo/update/(?P<pk>\\\\d+)$', grupo_update, name='grupo_update'), url(\n '^grupo/delete/(?P<pk>\\\\d+)$', grupo_delete, name='grupo_delete'), url(\n '^edit/grupo/$', edit_grupo, name='edit_grupo'), url(\n '^usuario/acesso/grupo/(?P<pk>\\\\d+)$', usuario_acesso_grupo, name=\n 'usuario_acesso_grupo'), url('^usuario/sem_acesso/grupo/(?P<pk>\\\\d+)$',\n usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'), url(\n '^porta/no_grupo/(?P<pk>\\\\d+)$', porta_no_grupo, name='porta_no_grupo'),\n url('^porta/nao_grupo/(?P<pk>\\\\d+)$', porta_nao_grupo, name=\n 'porta_nao_grupo'), url('^portas/$', portas, name='portas'), url(\n '^porta/busca/(?P<pk>\\\\d+)$', busca_porta, name='busca_porta'), url(\n '^busca/porta_frequencia/$', busca_porta_frequencia, name=\n 'busca_frequencia_porta'), url('^frequencia_porta_acesso/$',\n frequencia_porta_acesso, name='frequencia_porta_acesso'), url(\n '^porta/frequencia_acesso/(?P<pk>\\\\d+)$', porta_frequencias, name=\n 'porta_frequencias')]\n",
"step-4": "from django.conf.urls import url\nfrom django.contrib.auth.views import login,logout\n\nfrom appPortas.views import *\n\nurlpatterns = [\n url(r'^porta/list$', porta_list, name='porta_list'),\n url(r'^porta/detail/(?P<pk>\\d+)$',porta_detail, name='porta_detail'),\n url(r'^porta/new/$', porta_new, name='porta_new'),\n url(r'^porta/update/(?P<pk>\\d+)$',porta_update, name='porta_update'),\n url(r'^porta/delete/(?P<pk>\\d+)$',porta_delete, name='porta_delete'),\n url(r'^porta/usuarios/(?P<pk>\\d+)$', porta_delete, name='porta_delete'),\n\n url(r'^grupo/list$', grupo_list, name='grupo_list'),\n url(r'^grupo/detail/(?P<pk>\\d+)$',grupo_detail, name='grupo_detail'),\n url(r'^grupo/new/$', grupo_new, name='grupo_new'),\n url(r'^grupo/update/(?P<pk>\\d+)$',grupo_update, name='grupo_update'),\n url(r'^grupo/delete/(?P<pk>\\d+)$',grupo_delete, name='grupo_delete'),\n\n url(r'^edit/grupo/$', edit_grupo, name='edit_grupo'),\n\n url(r'^usuario/acesso/grupo/(?P<pk>\\d+)$', usuario_acesso_grupo, name='usuario_acesso_grupo'),\n url(r'^usuario/sem_acesso/grupo/(?P<pk>\\d+)$', usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'),\n\n url(r'^porta/no_grupo/(?P<pk>\\d+)$', porta_no_grupo, name='porta_no_grupo'),\n url(r'^porta/nao_grupo/(?P<pk>\\d+)$', porta_nao_grupo, name='porta_nao_grupo'),\n\n url(r'^portas/$', portas, name='portas'),\n url(r'^porta/busca/(?P<pk>\\d+)$', busca_porta, name='busca_porta'),\n url(r'^busca/porta_frequencia/$', busca_porta_frequencia, name='busca_frequencia_porta'),\n url(r'^frequencia_porta_acesso/$', frequencia_porta_acesso, name='frequencia_porta_acesso'),\n url(r'^porta/frequencia_acesso/(?P<pk>\\d+)$', porta_frequencias, name='porta_frequencias'),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
a = 3
b = 4
c = 5.66
d = 8.0
e = complex(c,d)
f = complex(float(a),float(b))
print("a is type:",type(a))
print("c is type:",type(c))
print("e is type:",type(e))
print(a + b)
print(d / c)
print(b / a)
#2个除约成整型
print(b // a)
print(e)
print(e + f)
print(sys.float_info)
|
normal
|
{
"blob_id": "2876c9f8db0395143b165b855b22e364e3cc8121",
"index": 9008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n",
"step-3": "<mask token>\na = 3\nb = 4\nc = 5.66\nd = 8.0\ne = complex(c, d)\nf = complex(float(a), float(b))\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n",
"step-4": "import sys\na = 3\nb = 4\nc = 5.66\nd = 8.0\ne = complex(c, d)\nf = complex(float(a), float(b))\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n",
"step-5": "import sys\n\na = 3\nb = 4\n\nc = 5.66\nd = 8.0\n\ne = complex(c,d)\nf = complex(float(a),float(b))\n\nprint(\"a is type:\",type(a))\nprint(\"c is type:\",type(c))\nprint(\"e is type:\",type(e))\n\nprint(a + b)\nprint(d / c)\n\nprint(b / a)\n#2个除约成整型\nprint(b // a)\n\nprint(e)\nprint(e + f)\n\nprint(sys.float_info)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_body() ->None:
for func in (weight, shower, food, water):
assert ilen(func()) >= 1
<|reserved_special_token_1|>
from more_itertools import ilen
from my.body import weight, shower, food, water
def test_body() ->None:
for func in (weight, shower, food, water):
assert ilen(func()) >= 1
|
flexible
|
{
"blob_id": "e06b740f27e41b9f120c962fd76a38a29d54af3c",
"index": 973,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_body() ->None:\n for func in (weight, shower, food, water):\n assert ilen(func()) >= 1\n",
"step-3": "from more_itertools import ilen\nfrom my.body import weight, shower, food, water\n\n\ndef test_body() ->None:\n for func in (weight, shower, food, water):\n assert ilen(func()) >= 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from models import Cell,Board
import random
from pdb import set_trace as bp
status={'end':-1}
game=None
class Game_Service(object):
def __init__(self,row_num,col_num):
self._row_num=row_num
self._col_num=col_num
mine_percent=0.3
self._mine_num=int(mine_percent*float(self._row_num*self._col_num))
self.shifts=[-1,0,1]
def generate_map(self):
""" generate mine map
"""
global game
game=Board(self._row_num,self._col_num)
s=set([])
while len(s)<=self._mine_num:
i=random.randint(0, self._row_num*self._col_num-1)
if i not in s:
self._set_mine(i)
s.add(i)
return {#'board':[game.get_board()[inx].get_neighbor() for inx in range(0,self._row_num*self._col_num)],
#'mines':game.get_mines(),
'row_num':self._row_num,
'col_num':self._col_num}
def _set_mine(self,index):
""" set cell[index] as a mine
and update its neighbor cell's mine number
"""
game.get_cell(index).set_mine() #set current index as mine
game.add_mine(index) #add index to mine_index
# add its neighbor's neighbor_num
temp_r=index/self._col_num
temp_c=index%self._col_num
shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts
if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]
and temp_r+dr in range(0,self._row_num)
and temp_c+dc in range(0,self._col_num)]
for s in shift:
game.get_cell(s[0]*self._col_num+s[1]).add_neighbor()
def choose_mine(self,index):
""" choose a cell
return game status and cells need to change
"""
cell=game.get_cell(index)
update_stack={'type':'continue'}
if cell.isMine():
self._flipAll(update_stack) #clicked on a mine
else:
self._flip(update_stack,index) #clicked on a safe cell
return update_stack
def _flip(self,update_stack,index):
""" flip the chosen cell and its adjcent cells
"""
cell=game.get_cell(index)
if cell.ifFlipped()==False:
cell.flip()
game.decrease_remain()
if cell.isMine()==False and cell.get_neighbor()>0:
update_stack[str(index)]=cell.get_neighbor()
return
elif cell.isMine()==False and cell.get_neighbor()==0:
update_stack[str(index)]=cell.get_neighbor()
temp_r=index/self._col_num
temp_c=index%self._col_num
shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts
if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]
and temp_r+dr in range(0,self._row_num)
and temp_c+dc in range(0,self._col_num)]
for s in shift:
self._flip(update_stack,s[0]*self._col_num+s[1])
def _flipAll(self,update_stack):
""" flip all mines
"""
mines_index=game.get_mines()
for i in mines_index:
update_stack[str(i)]=status['end']
update_stack['row_num']=self._row_num
update_stack['col_num']=self._col_num
update_stack['_mine_num']=len(mines_index)
if len(mines_index)==game.get_remain():
update_stack['type']='win'
else:
update_stack['type']='lose'
|
normal
|
{
"blob_id": "4af72cab6444922ca66641a08d45bcfe5a689844",
"index": 6763,
"step-1": "<mask token>\n\n\nclass Game_Service(object):\n\n def __init__(self, row_num, col_num):\n self._row_num = row_num\n self._col_num = col_num\n mine_percent = 0.3\n self._mine_num = int(mine_percent * float(self._row_num * self.\n _col_num))\n self.shifts = [-1, 0, 1]\n\n def generate_map(self):\n \"\"\" generate mine map\n \"\"\"\n global game\n game = Board(self._row_num, self._col_num)\n s = set([])\n while len(s) <= self._mine_num:\n i = random.randint(0, self._row_num * self._col_num - 1)\n if i not in s:\n self._set_mine(i)\n s.add(i)\n return {'row_num': self._row_num, 'col_num': self._col_num}\n <mask token>\n <mask token>\n\n def _flip(self, update_stack, index):\n \"\"\" flip the chosen cell and its adjcent cells\n \"\"\"\n cell = game.get_cell(index)\n if cell.ifFlipped() == False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine() == False and cell.get_neighbor() > 0:\n update_stack[str(index)] = cell.get_neighbor()\n return\n elif cell.isMine() == False and cell.get_neighbor() == 0:\n update_stack[str(index)] = cell.get_neighbor()\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for\n dc in self.shifts if [temp_r + dr, temp_c + dc] != [\n temp_r, temp_c] and temp_r + dr in range(0, self.\n _row_num) and temp_c + dc in range(0, self._col_num)]\n for s in shift:\n self._flip(update_stack, s[0] * self._col_num + s[1])\n\n def _flipAll(self, update_stack):\n \"\"\" flip all mines\n \"\"\"\n mines_index = game.get_mines()\n for i in mines_index:\n update_stack[str(i)] = status['end']\n update_stack['row_num'] = self._row_num\n update_stack['col_num'] = self._col_num\n update_stack['_mine_num'] = len(mines_index)\n if len(mines_index) == game.get_remain():\n update_stack['type'] = 'win'\n else:\n update_stack['type'] = 'lose'\n",
"step-2": "<mask token>\n\n\nclass Game_Service(object):\n\n def __init__(self, row_num, col_num):\n self._row_num = row_num\n self._col_num = col_num\n mine_percent = 0.3\n self._mine_num = int(mine_percent * float(self._row_num * self.\n _col_num))\n self.shifts = [-1, 0, 1]\n\n def generate_map(self):\n \"\"\" generate mine map\n \"\"\"\n global game\n game = Board(self._row_num, self._col_num)\n s = set([])\n while len(s) <= self._mine_num:\n i = random.randint(0, self._row_num * self._col_num - 1)\n if i not in s:\n self._set_mine(i)\n s.add(i)\n return {'row_num': self._row_num, 'col_num': self._col_num}\n\n def _set_mine(self, index):\n \"\"\" set cell[index] as a mine\n and update its neighbor cell's mine number\n \"\"\"\n game.get_cell(index).set_mine()\n game.add_mine(index)\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for dc in\n self.shifts if [temp_r + dr, temp_c + dc] != [temp_r, temp_c] and\n temp_r + dr in range(0, self._row_num) and temp_c + dc in range\n (0, self._col_num)]\n for s in shift:\n game.get_cell(s[0] * self._col_num + s[1]).add_neighbor()\n <mask token>\n\n def _flip(self, update_stack, index):\n \"\"\" flip the chosen cell and its adjcent cells\n \"\"\"\n cell = game.get_cell(index)\n if cell.ifFlipped() == False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine() == False and cell.get_neighbor() > 0:\n update_stack[str(index)] = cell.get_neighbor()\n return\n elif cell.isMine() == False and cell.get_neighbor() == 0:\n update_stack[str(index)] = cell.get_neighbor()\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for\n dc in self.shifts if [temp_r + dr, temp_c + dc] != [\n temp_r, temp_c] and temp_r + dr in range(0, self.\n _row_num) and temp_c + dc in range(0, self._col_num)]\n for s in shift:\n self._flip(update_stack, s[0] * self._col_num + s[1])\n\n def _flipAll(self, update_stack):\n \"\"\" flip all mines\n \"\"\"\n mines_index = game.get_mines()\n for i in mines_index:\n update_stack[str(i)] = status['end']\n update_stack['row_num'] = self._row_num\n update_stack['col_num'] = self._col_num\n update_stack['_mine_num'] = len(mines_index)\n if len(mines_index) == game.get_remain():\n update_stack['type'] = 'win'\n else:\n update_stack['type'] = 'lose'\n",
"step-3": "<mask token>\n\n\nclass Game_Service(object):\n\n def __init__(self, row_num, col_num):\n self._row_num = row_num\n self._col_num = col_num\n mine_percent = 0.3\n self._mine_num = int(mine_percent * float(self._row_num * self.\n _col_num))\n self.shifts = [-1, 0, 1]\n\n def generate_map(self):\n \"\"\" generate mine map\n \"\"\"\n global game\n game = Board(self._row_num, self._col_num)\n s = set([])\n while len(s) <= self._mine_num:\n i = random.randint(0, self._row_num * self._col_num - 1)\n if i not in s:\n self._set_mine(i)\n s.add(i)\n return {'row_num': self._row_num, 'col_num': self._col_num}\n\n def _set_mine(self, index):\n \"\"\" set cell[index] as a mine\n and update its neighbor cell's mine number\n \"\"\"\n game.get_cell(index).set_mine()\n game.add_mine(index)\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for dc in\n self.shifts if [temp_r + dr, temp_c + dc] != [temp_r, temp_c] and\n temp_r + dr in range(0, self._row_num) and temp_c + dc in range\n (0, self._col_num)]\n for s in shift:\n game.get_cell(s[0] * self._col_num + s[1]).add_neighbor()\n\n def choose_mine(self, index):\n \"\"\" choose a cell\n return game status and cells need to change\n \"\"\"\n cell = game.get_cell(index)\n update_stack = {'type': 'continue'}\n if cell.isMine():\n self._flipAll(update_stack)\n else:\n self._flip(update_stack, index)\n return update_stack\n\n def _flip(self, update_stack, index):\n \"\"\" flip the chosen cell and its adjcent cells\n \"\"\"\n cell = game.get_cell(index)\n if cell.ifFlipped() == False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine() == False and cell.get_neighbor() > 0:\n update_stack[str(index)] = cell.get_neighbor()\n return\n elif cell.isMine() == False and cell.get_neighbor() == 0:\n update_stack[str(index)] = cell.get_neighbor()\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for\n dc in self.shifts if [temp_r + dr, temp_c + dc] != [\n temp_r, temp_c] and temp_r + dr in range(0, self.\n _row_num) and temp_c + dc in range(0, self._col_num)]\n for s in shift:\n self._flip(update_stack, s[0] * self._col_num + s[1])\n\n def _flipAll(self, update_stack):\n \"\"\" flip all mines\n \"\"\"\n mines_index = game.get_mines()\n for i in mines_index:\n update_stack[str(i)] = status['end']\n update_stack['row_num'] = self._row_num\n update_stack['col_num'] = self._col_num\n update_stack['_mine_num'] = len(mines_index)\n if len(mines_index) == game.get_remain():\n update_stack['type'] = 'win'\n else:\n update_stack['type'] = 'lose'\n",
"step-4": "from models import Cell, Board\nimport random\nfrom pdb import set_trace as bp\nstatus = {'end': -1}\ngame = None\n\n\nclass Game_Service(object):\n\n def __init__(self, row_num, col_num):\n self._row_num = row_num\n self._col_num = col_num\n mine_percent = 0.3\n self._mine_num = int(mine_percent * float(self._row_num * self.\n _col_num))\n self.shifts = [-1, 0, 1]\n\n def generate_map(self):\n \"\"\" generate mine map\n \"\"\"\n global game\n game = Board(self._row_num, self._col_num)\n s = set([])\n while len(s) <= self._mine_num:\n i = random.randint(0, self._row_num * self._col_num - 1)\n if i not in s:\n self._set_mine(i)\n s.add(i)\n return {'row_num': self._row_num, 'col_num': self._col_num}\n\n def _set_mine(self, index):\n \"\"\" set cell[index] as a mine\n and update its neighbor cell's mine number\n \"\"\"\n game.get_cell(index).set_mine()\n game.add_mine(index)\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for dc in\n self.shifts if [temp_r + dr, temp_c + dc] != [temp_r, temp_c] and\n temp_r + dr in range(0, self._row_num) and temp_c + dc in range\n (0, self._col_num)]\n for s in shift:\n game.get_cell(s[0] * self._col_num + s[1]).add_neighbor()\n\n def choose_mine(self, index):\n \"\"\" choose a cell\n return game status and cells need to change\n \"\"\"\n cell = game.get_cell(index)\n update_stack = {'type': 'continue'}\n if cell.isMine():\n self._flipAll(update_stack)\n else:\n self._flip(update_stack, index)\n return update_stack\n\n def _flip(self, update_stack, index):\n \"\"\" flip the chosen cell and its adjcent cells\n \"\"\"\n cell = game.get_cell(index)\n if cell.ifFlipped() == False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine() == False and cell.get_neighbor() > 0:\n update_stack[str(index)] = cell.get_neighbor()\n return\n elif cell.isMine() == False and cell.get_neighbor() == 0:\n update_stack[str(index)] = cell.get_neighbor()\n temp_r = index / self._col_num\n temp_c = index % self._col_num\n shift = [[temp_r + dr, temp_c + dc] for dr in self.shifts for\n dc in self.shifts if [temp_r + dr, temp_c + dc] != [\n temp_r, temp_c] and temp_r + dr in range(0, self.\n _row_num) and temp_c + dc in range(0, self._col_num)]\n for s in shift:\n self._flip(update_stack, s[0] * self._col_num + s[1])\n\n def _flipAll(self, update_stack):\n \"\"\" flip all mines\n \"\"\"\n mines_index = game.get_mines()\n for i in mines_index:\n update_stack[str(i)] = status['end']\n update_stack['row_num'] = self._row_num\n update_stack['col_num'] = self._col_num\n update_stack['_mine_num'] = len(mines_index)\n if len(mines_index) == game.get_remain():\n update_stack['type'] = 'win'\n else:\n update_stack['type'] = 'lose'\n",
"step-5": "\nfrom models import Cell,Board\nimport random\nfrom pdb import set_trace as bp\n\n\nstatus={'end':-1}\ngame=None\n\nclass Game_Service(object):\n\n def __init__(self,row_num,col_num):\n self._row_num=row_num\n self._col_num=col_num\n mine_percent=0.3\n self._mine_num=int(mine_percent*float(self._row_num*self._col_num))\n self.shifts=[-1,0,1]\n \n \n def generate_map(self):\n \"\"\" generate mine map\n \"\"\"\n global game\n game=Board(self._row_num,self._col_num)\n s=set([])\n while len(s)<=self._mine_num:\n i=random.randint(0, self._row_num*self._col_num-1)\n if i not in s:\n self._set_mine(i)\n s.add(i) \n return {#'board':[game.get_board()[inx].get_neighbor() for inx in range(0,self._row_num*self._col_num)],\n #'mines':game.get_mines(),\n 'row_num':self._row_num,\n 'col_num':self._col_num}\n \n\n def _set_mine(self,index):\n \"\"\" set cell[index] as a mine\n and update its neighbor cell's mine number\n \"\"\"\n game.get_cell(index).set_mine() #set current index as mine\n game.add_mine(index) #add index to mine_index\n\n # add its neighbor's neighbor_num \n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n game.get_cell(s[0]*self._col_num+s[1]).add_neighbor()\n \n\n def choose_mine(self,index):\n \"\"\" choose a cell\n return game status and cells need to change\n \"\"\"\n cell=game.get_cell(index)\n update_stack={'type':'continue'}\n \n if cell.isMine():\n self._flipAll(update_stack) #clicked on a mine\n else:\n self._flip(update_stack,index) #clicked on a safe cell\n\n return update_stack\n \n\n def _flip(self,update_stack,index):\n \"\"\" flip the chosen cell and its adjcent cells\n \"\"\"\n cell=game.get_cell(index)\n if cell.ifFlipped()==False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine()==False and cell.get_neighbor()>0:\n update_stack[str(index)]=cell.get_neighbor()\n return\n elif cell.isMine()==False and cell.get_neighbor()==0:\n update_stack[str(index)]=cell.get_neighbor()\n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n self._flip(update_stack,s[0]*self._col_num+s[1])\n \n\n def _flipAll(self,update_stack):\n \"\"\" flip all mines\n \"\"\"\n mines_index=game.get_mines()\n for i in mines_index:\n update_stack[str(i)]=status['end']\n\n update_stack['row_num']=self._row_num\n update_stack['col_num']=self._col_num\n update_stack['_mine_num']=len(mines_index) \n if len(mines_index)==game.get_remain(): \n update_stack['type']='win' \n else:\n update_stack['type']='lose'\n \n \n \n \n \n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE,
primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = 'headline',
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Book(models.Model):
CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',
'Drama')
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text = models.TextField(max_length=1000)
genre = models.CharField(max_length=50, choices=CHOISE_GENRE)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE,
primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = 'headline',
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Example(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Author(models.Model):
name = models.CharField(max_length=50, verbose_name='Имя', blank=True)
surname = models.CharField(max_length=50, verbose_name='Фамилия')
date_birth = models.DateField(auto_now=False, verbose_name='Дата рождения')
def __str__(self):
return self.name + ' ' + self.surname
class Book(models.Model):
CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',
'Drama')
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text = models.TextField(max_length=1000)
genre = models.CharField(max_length=50, choices=CHOISE_GENRE)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE,
primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = 'headline',
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Example(models.Model):
integer_field = models.IntegerField()
positive_field = models.PositiveIntegerField()
positive_small_field = models.PositiveSmallIntegerField()
big_integer_field = models.BigIntegerField()
float_field = models.FloatField()
binary_field = models.BinaryField()
boolean_field = models.BooleanField()
char_field = models.CharField(max_length=5)
text_field = models.TextField(max_length=20)
date_field = models.DateField(auto_now=False, auto_now_add=False)
date_time_field = models.DateTimeField(auto_now_add=False)
decimal_field = models.DecimalField(max_digits=8, decimal_places=2)
email = models.EmailField()
file_field = models.FileField(upload_to='file')
image_field = models.ImageField(upload_to='images')
class Author(models.Model):
name = models.CharField(max_length=50, verbose_name='Имя', blank=True)
surname = models.CharField(max_length=50, verbose_name='Фамилия')
date_birth = models.DateField(auto_now=False, verbose_name='Дата рождения')
def __str__(self):
return self.name + ' ' + self.surname
class Book(models.Model):
CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',
'Drama')
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text = models.TextField(max_length=1000)
genre = models.CharField(max_length=50, choices=CHOISE_GENRE)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE,
primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = 'headline',
<|reserved_special_token_1|>
from django.db import models
class TestModel(models.Model):
name = models.CharField(max_length=15)
surname = models.CharField(max_length=10)
age = models.IntegerField()
class Example(models.Model):
integer_field = models.IntegerField()
positive_field = models.PositiveIntegerField()
positive_small_field = models.PositiveSmallIntegerField()
big_integer_field = models.BigIntegerField()
float_field = models.FloatField()
binary_field = models.BinaryField()
boolean_field = models.BooleanField()
char_field = models.CharField(max_length=5)
text_field = models.TextField(max_length=20)
date_field = models.DateField(auto_now=False, auto_now_add=False)
date_time_field = models.DateTimeField(auto_now_add=False)
decimal_field = models.DecimalField(max_digits=8, decimal_places=2) #222222.22
email = models.EmailField()
file_field = models.FileField(upload_to='file')
image_field = models.ImageField(upload_to='images')
class Author(models.Model):
name = models.CharField(max_length=50, verbose_name="Имя", blank=True)
surname = models.CharField(max_length=50, verbose_name="Фамилия")
date_birth = models.DateField(auto_now=False, verbose_name="Дата рождения")
def __str__(self):
return self.name + ' ' + self.surname
class Book(models.Model):
CHOISE_GENRE = (
('comedy', "Comedy"),
('tragedy', "Tragedy"),
('drama', "Drama"),
)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
text = models.TextField(max_length=1000)
genre = models.CharField(max_length=50, choices=CHOISE_GENRE)
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place, on_delete=models.CASCADE, primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.place
class Publication(models.Model):
title = models.CharField(max_length=30)
# def __str__(self):
# return self.title
#
# class Meta:
# ordering = ('title', )
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
# def __str__(self):
# return self.headline
#
class Meta:
ordering = ('headline', )
|
flexible
|
{
"blob_id": "8afce5b47c7c9c67a8be493f7f4de1510352b1c7",
"index": 4559,
"step-1": "<mask token>\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-2": "<mask token>\n\n\nclass Book(models.Model):\n CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',\n 'Drama')\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-3": "<mask token>\n\n\nclass Example(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50, verbose_name='Имя', blank=True)\n surname = models.CharField(max_length=50, verbose_name='Фамилия')\n date_birth = models.DateField(auto_now=False, verbose_name='Дата рождения')\n\n def __str__(self):\n return self.name + ' ' + self.surname\n\n\nclass Book(models.Model):\n CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',\n 'Drama')\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-4": "<mask token>\n\n\nclass Example(models.Model):\n integer_field = models.IntegerField()\n positive_field = models.PositiveIntegerField()\n positive_small_field = models.PositiveSmallIntegerField()\n big_integer_field = models.BigIntegerField()\n float_field = models.FloatField()\n binary_field = models.BinaryField()\n boolean_field = models.BooleanField()\n char_field = models.CharField(max_length=5)\n text_field = models.TextField(max_length=20)\n date_field = models.DateField(auto_now=False, auto_now_add=False)\n date_time_field = models.DateTimeField(auto_now_add=False)\n decimal_field = models.DecimalField(max_digits=8, decimal_places=2)\n email = models.EmailField()\n file_field = models.FileField(upload_to='file')\n image_field = models.ImageField(upload_to='images')\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50, verbose_name='Имя', blank=True)\n surname = models.CharField(max_length=50, verbose_name='Фамилия')\n date_birth = models.DateField(auto_now=False, verbose_name='Дата рождения')\n\n def __str__(self):\n return self.name + ' ' + self.surname\n\n\nclass Book(models.Model):\n CHOISE_GENRE = ('comedy', 'Comedy'), ('tragedy', 'Tragedy'), ('drama',\n 'Drama')\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE,\n primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n\n class Meta:\n ordering = 'headline',\n",
"step-5": "from django.db import models\n\n\nclass TestModel(models.Model):\n name = models.CharField(max_length=15)\n surname = models.CharField(max_length=10)\n age = models.IntegerField()\n\n\nclass Example(models.Model):\n integer_field = models.IntegerField()\n positive_field = models.PositiveIntegerField()\n positive_small_field = models.PositiveSmallIntegerField()\n big_integer_field = models.BigIntegerField()\n float_field = models.FloatField()\n binary_field = models.BinaryField()\n boolean_field = models.BooleanField()\n char_field = models.CharField(max_length=5)\n text_field = models.TextField(max_length=20)\n date_field = models.DateField(auto_now=False, auto_now_add=False)\n date_time_field = models.DateTimeField(auto_now_add=False)\n decimal_field = models.DecimalField(max_digits=8, decimal_places=2) #222222.22\n email = models.EmailField()\n file_field = models.FileField(upload_to='file')\n image_field = models.ImageField(upload_to='images')\n\n\nclass Author(models.Model):\n name = models.CharField(max_length=50, verbose_name=\"Имя\", blank=True)\n surname = models.CharField(max_length=50, verbose_name=\"Фамилия\")\n date_birth = models.DateField(auto_now=False, verbose_name=\"Дата рождения\")\n\n def __str__(self):\n return self.name + ' ' + self.surname\n\n\nclass Book(models.Model):\n\n CHOISE_GENRE = (\n ('comedy', \"Comedy\"),\n ('tragedy', \"Tragedy\"),\n ('drama', \"Drama\"),\n )\n\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=50)\n text = models.TextField(max_length=1000)\n genre = models.CharField(max_length=50, choices=CHOISE_GENRE)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=50)\n address = models.CharField(max_length=80)\n\n def __str__(self):\n return self.name\n\n\nclass Restaurant(models.Model):\n place = models.OneToOneField(Place, on_delete=models.CASCADE, primary_key=True)\n serves_hot_dogs = models.BooleanField(default=False)\n serves_pizza = models.BooleanField(default=False)\n\n def __str__(self):\n return self.place\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n # def __str__(self):\n # return self.title\n #\n # class Meta:\n # ordering = ('title', )\n\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n # def __str__(self):\n # return self.headline\n #\n class Meta:\n ordering = ('headline', )\n",
"step-ids": [
10,
12,
16,
17,
21
]
}
|
[
10,
12,
16,
17,
21
] |
from keras.models import load_model
from DataManager import *
def loadModel(name):
model = load_model('./Model/%s.h5' % name)
return model
def predict(tag):
test = getPIData(tag, '2019-11-05', '2019-11-06')
test_arg = addFeature(test)
test_norm = normalize(test_arg)
X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)
model = loadModel(tag)
return model.predict(X_test)
print(predict('USG60_eth0_ifInOctets'))
|
normal
|
{
"blob_id": "a6154c5d855dc53d73db08bbb5b5d7437056e156",
"index": 1566,
"step-1": "<mask token>\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\ndef predict(tag):\n test = getPIData(tag, '2019-11-05', '2019-11-06')\n test_arg = addFeature(test)\n test_norm = normalize(test_arg)\n X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)\n model = loadModel(tag)\n return model.predict(X_test)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\ndef predict(tag):\n test = getPIData(tag, '2019-11-05', '2019-11-06')\n test_arg = addFeature(test)\n test_norm = normalize(test_arg)\n X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)\n model = loadModel(tag)\n return model.predict(X_test)\n\n\nprint(predict('USG60_eth0_ifInOctets'))\n",
"step-4": "from keras.models import load_model\nfrom DataManager import *\n\n\ndef loadModel(name):\n model = load_model('./Model/%s.h5' % name)\n return model\n\n\ndef predict(tag):\n test = getPIData(tag, '2019-11-05', '2019-11-06')\n test_arg = addFeature(test)\n test_norm = normalize(test_arg)\n X_test, Y_test = buildTrain(test_norm, 12 * 12, 1)\n model = loadModel(tag)\n return model.predict(X_test)\n\n\nprint(predict('USG60_eth0_ifInOctets'))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
ID = '113'
TITLE = 'Path Sum II'
DIFFICULTY = 'Medium'
URL = 'https://oj.leetcode.com/problems/path-sum-ii/'
BOOK = False
PROBLEM = r"""Given a binary tree and a sum, find all root-to-leaf paths where each path's
sum equals the given sum.
For example:
Given the below binary tree and `sum = 22`,
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
return
[
[5,4,11,2],
[5,8,4,5]
]
"""
|
normal
|
{
"blob_id": "9a62a57f6d9af7ef09c8ed6e78a100df7978da6e",
"index": 8631,
"step-1": "<mask token>\n",
"step-2": "ID = '113'\nTITLE = 'Path Sum II'\nDIFFICULTY = 'Medium'\nURL = 'https://oj.leetcode.com/problems/path-sum-ii/'\nBOOK = False\nPROBLEM = \"\"\"Given a binary tree and a sum, find all root-to-leaf paths where each path's\nsum equals the given sum.\n\nFor example: \nGiven the below binary tree and `sum = 22`,\n\n \n \n \n 5\n / \\\\\n 4 8\n / / \\\\\n 11 13 4\n / \\\\ / \\\\\n 7 2 5 1\n \n\nreturn \n\n \n \n \n [\n [5,4,11,2],\n [5,8,4,5]\n ]\n \n\n\n\"\"\"\n",
"step-3": "ID = '113'\nTITLE = 'Path Sum II'\nDIFFICULTY = 'Medium'\nURL = 'https://oj.leetcode.com/problems/path-sum-ii/'\nBOOK = False\nPROBLEM = r\"\"\"Given a binary tree and a sum, find all root-to-leaf paths where each path's\nsum equals the given sum.\n\nFor example: \nGiven the below binary tree and `sum = 22`,\n\n \n \n \n 5\n / \\\n 4 8\n / / \\\n 11 13 4\n / \\ / \\\n 7 2 5 1\n \n\nreturn \n\n \n \n \n [\n [5,4,11,2],\n [5,8,4,5]\n ]\n \n\n\n\"\"\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def printPar():
for i in range(len(par)):
print "par[{0:d}] = {1:d}".format(i,par[i])
def printImpar():
for i in range(len(impar)):
print "impar[{0:d}] = {1:d}".format(i,impar[i])
par = []
impar = []
for i in range(15):
n= int(raw_input())
if n%2 == 0:
if len(par)<4:
par.append(n)
elif len(par)==4:
par.append(n)
printPar()
par = []
else:
if len(impar)<4:
impar.append(n)
elif len(impar)==4:
impar.append(n)
printImpar()
impar = []
if len(impar)>0:
printImpar()
if len(par)>0:
printPar()
|
normal
|
{
"blob_id": "7e33475a6ab7ad0d1e9d7d00b8443329e265fe69",
"index": 6793,
"step-1": "def printPar():\n for i in range(len(par)):\n print \"par[{0:d}] = {1:d}\".format(i,par[i])\ndef printImpar():\n for i in range(len(impar)):\n print \"impar[{0:d}] = {1:d}\".format(i,impar[i])\npar = []\nimpar = []\n\nfor i in range(15):\n n= int(raw_input())\n if n%2 == 0:\n if len(par)<4:\n par.append(n)\n elif len(par)==4:\n par.append(n)\n printPar()\n par = []\n else:\n if len(impar)<4:\n impar.append(n)\n elif len(impar)==4:\n impar.append(n)\n printImpar()\n impar = []\n\nif len(impar)>0:\n printImpar()\nif len(par)>0:\n printPar()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
default_args = {'owner': 'Jaimin', 'depends_on_past': False, 'start_date':
datetime.now(), 'email': ['[email protected]'], 'email_on_failure':
False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(
minutes=5)}
dag = DAG('hive_create_part_v1', default_args=default_args,
schedule_interval='0 1 * * *', concurrency=1)
task = BashOperator(task_id='hive_create_parition', bash_command=
'bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ', dag=dag)
<|reserved_special_token_1|>
import logging
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators import BashOperator, DummyOperator
from datetime import datetime, timedelta
default_args = {'owner': 'Jaimin', 'depends_on_past': False, 'start_date':
datetime.now(), 'email': ['[email protected]'], 'email_on_failure':
False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(
minutes=5)}
dag = DAG('hive_create_part_v1', default_args=default_args,
schedule_interval='0 1 * * *', concurrency=1)
task = BashOperator(task_id='hive_create_parition', bash_command=
'bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ', dag=dag)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators import BashOperator, DummyOperator
from datetime import datetime, timedelta
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Jaimin',
'depends_on_past': False,
'start_date': datetime.now(),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'hive_create_part_v1',
default_args=default_args,
schedule_interval="0 1 * * *",
concurrency=1)
# --------------------------------------------------------------------------------
# set tasks
# --------------------------------------------------------------------------------
task = BashOperator(
task_id='hive_create_parition',
bash_command='bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ',
dag=dag)
|
flexible
|
{
"blob_id": "49492ad1a1734be02ebefb77095fd560a7a7efd8",
"index": 7155,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndefault_args = {'owner': 'Jaimin', 'depends_on_past': False, 'start_date':\n datetime.now(), 'email': ['[email protected]'], 'email_on_failure': \n False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(\n minutes=5)}\ndag = DAG('hive_create_part_v1', default_args=default_args,\n schedule_interval='0 1 * * *', concurrency=1)\ntask = BashOperator(task_id='hive_create_parition', bash_command=\n 'bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ', dag=dag)\n",
"step-3": "import logging\nimport airflow\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators import BashOperator, DummyOperator\nfrom datetime import datetime, timedelta\ndefault_args = {'owner': 'Jaimin', 'depends_on_past': False, 'start_date':\n datetime.now(), 'email': ['[email protected]'], 'email_on_failure': \n False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(\n minutes=5)}\ndag = DAG('hive_create_part_v1', default_args=default_args,\n schedule_interval='0 1 * * *', concurrency=1)\ntask = BashOperator(task_id='hive_create_parition', bash_command=\n 'bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ', dag=dag)\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport airflow\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators import BashOperator, DummyOperator\n\nfrom datetime import datetime, timedelta\n\n\n# --------------------------------------------------------------------------------\n# set default arguments\n# --------------------------------------------------------------------------------\n\ndefault_args = {\n 'owner': 'Jaimin',\n 'depends_on_past': False,\n 'start_date': datetime.now(),\n 'email': ['[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\ndag = DAG(\n 'hive_create_part_v1',\n default_args=default_args,\n schedule_interval=\"0 1 * * *\",\n concurrency=1)\n\n# --------------------------------------------------------------------------------\n# set tasks \n# --------------------------------------------------------------------------------\n\ntask = BashOperator(\n task_id='hive_create_parition',\n bash_command='bash /data/appdata/airflow/script/hive_create_job.sh mnode2 ',\n dag=dag)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def find_jobs():
html_text = requests.get(
'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='
).text
soup = BeautifulSoup(html_text, 'lxml')
jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')
for job in jobs:
posted = job.find('span', class_='sim-posted').span.text
if 'few' in posted:
company_name = job.find('h3', class_='joblist-comp-name'
).text.replace(' ', '')
skills = job.find('span', class_='srp-skills').text.replace(' ', ''
)
more_info = job.header.a['href']
if unfamilar_skills not in skills:
print(f'Company Name: {company_name.strip()}')
print(f'Skills: {skills.strip()}')
print(f'More Info: {more_info}')
print('')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Put some unfamiliar skills')
<|reserved_special_token_0|>
print(f'Filtering result for {unfamilar_skills}...\n')
def find_jobs():
html_text = requests.get(
'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='
).text
soup = BeautifulSoup(html_text, 'lxml')
jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')
for job in jobs:
posted = job.find('span', class_='sim-posted').span.text
if 'few' in posted:
company_name = job.find('h3', class_='joblist-comp-name'
).text.replace(' ', '')
skills = job.find('span', class_='srp-skills').text.replace(' ', ''
)
more_info = job.header.a['href']
if unfamilar_skills not in skills:
print(f'Company Name: {company_name.strip()}')
print(f'Skills: {skills.strip()}')
print(f'More Info: {more_info}')
print('')
if __name__ == '__main__':
find_jobs()
while True:
find_jobs()
filter_time = 10
print(f'Waiting for {filter_time} minute')
time.sleep(filter_time * 60)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Put some unfamiliar skills')
unfamilar_skills = input('>')
print(f'Filtering result for {unfamilar_skills}...\n')
def find_jobs():
html_text = requests.get(
'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='
).text
soup = BeautifulSoup(html_text, 'lxml')
jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')
for job in jobs:
posted = job.find('span', class_='sim-posted').span.text
if 'few' in posted:
company_name = job.find('h3', class_='joblist-comp-name'
).text.replace(' ', '')
skills = job.find('span', class_='srp-skills').text.replace(' ', ''
)
more_info = job.header.a['href']
if unfamilar_skills not in skills:
print(f'Company Name: {company_name.strip()}')
print(f'Skills: {skills.strip()}')
print(f'More Info: {more_info}')
print('')
if __name__ == '__main__':
find_jobs()
while True:
find_jobs()
filter_time = 10
print(f'Waiting for {filter_time} minute')
time.sleep(filter_time * 60)
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import time
print('Put some unfamiliar skills')
unfamilar_skills = input('>')
print(f'Filtering result for {unfamilar_skills}...\n')
def find_jobs():
html_text = requests.get(
'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='
).text
soup = BeautifulSoup(html_text, 'lxml')
jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')
for job in jobs:
posted = job.find('span', class_='sim-posted').span.text
if 'few' in posted:
company_name = job.find('h3', class_='joblist-comp-name'
).text.replace(' ', '')
skills = job.find('span', class_='srp-skills').text.replace(' ', ''
)
more_info = job.header.a['href']
if unfamilar_skills not in skills:
print(f'Company Name: {company_name.strip()}')
print(f'Skills: {skills.strip()}')
print(f'More Info: {more_info}')
print('')
if __name__ == '__main__':
find_jobs()
while True:
find_jobs()
filter_time = 10
print(f'Waiting for {filter_time} minute')
time.sleep(filter_time * 60)
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import time
print("Put some unfamiliar skills")
unfamilar_skills = input(">")
print(f"Filtering result for {unfamilar_skills}...\n")
def find_jobs():
html_text = requests.get('https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=').text
soup = BeautifulSoup(html_text,'lxml')
jobs = soup.find_all('li',class_='clearfix job-bx wht-shd-bx')
for job in jobs:
posted = job.find('span',class_='sim-posted').span.text
if("few" in posted):
company_name = job.find('h3',class_='joblist-comp-name').text.replace(" ",'')
skills = job.find('span',class_='srp-skills').text.replace(' ','')
more_info = job.header.a['href']
if unfamilar_skills not in skills:
print(f'Company Name: {company_name.strip()}')
print(f'Skills: {skills.strip()}')
print(f"More Info: {more_info}")
print("")
if __name__ == '__main__':
find_jobs()
while True:
find_jobs()
filter_time = 10
print(f"Waiting for {filter_time} minute")
time.sleep(filter_time*60)
|
flexible
|
{
"blob_id": "92b71c67130cd37b2143fbd9ad71fe9a18b3f7e8",
"index": 2622,
"step-1": "<mask token>\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Put some unfamiliar skills')\n<mask token>\nprint(f'Filtering result for {unfamilar_skills}...\\n')\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\nif __name__ == '__main__':\n find_jobs()\n while True:\n find_jobs()\n filter_time = 10\n print(f'Waiting for {filter_time} minute')\n time.sleep(filter_time * 60)\n",
"step-3": "<mask token>\nprint('Put some unfamiliar skills')\nunfamilar_skills = input('>')\nprint(f'Filtering result for {unfamilar_skills}...\\n')\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\nif __name__ == '__main__':\n find_jobs()\n while True:\n find_jobs()\n filter_time = 10\n print(f'Waiting for {filter_time} minute')\n time.sleep(filter_time * 60)\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport time\nprint('Put some unfamiliar skills')\nunfamilar_skills = input('>')\nprint(f'Filtering result for {unfamilar_skills}...\\n')\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\nif __name__ == '__main__':\n find_jobs()\n while True:\n find_jobs()\n filter_time = 10\n print(f'Waiting for {filter_time} minute')\n time.sleep(filter_time * 60)\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport time\nprint(\"Put some unfamiliar skills\")\nunfamilar_skills = input(\">\")\nprint(f\"Filtering result for {unfamilar_skills}...\\n\")\n\ndef find_jobs():\n html_text = requests.get('https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=').text\n soup = BeautifulSoup(html_text,'lxml')\n jobs = soup.find_all('li',class_='clearfix job-bx wht-shd-bx')\n\n for job in jobs:\n posted = job.find('span',class_='sim-posted').span.text\n if(\"few\" in posted):\n company_name = job.find('h3',class_='joblist-comp-name').text.replace(\" \",'')\n skills = job.find('span',class_='srp-skills').text.replace(' ','')\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f\"More Info: {more_info}\")\n print(\"\")\n\n\nif __name__ == '__main__':\n find_jobs() \n while True:\n find_jobs()\n filter_time = 10\n print(f\"Waiting for {filter_time} minute\")\n time.sleep(filter_time*60)\n\n \n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import random
import torch
import numpy as np
from torch.autograd import Variable
class SupportSetManager(object):
FIXED_FIRST = 0
RANDOM = 1
def __init__(self, datasets, config, sample_per_class):
self.config = config
(TEXT, LABEL, train, dev, test) = datasets[0]
self.TEXT = TEXT
self.sample_per_class = sample_per_class
print('Picking up prototypes')
self.prototype_text_list = []
for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):
prototype_text = []
#print taskid, LABEL.vocab
if not hasattr(LABEL, 'vocab'):
self.prototype_text_list.append(prototype_text)
continue
for lab_id in range(len(LABEL.vocab.itos)):
prototype_text.append([])
for example in train.examples:
lab_id = LABEL.vocab.stoi[example.label]
if prototype_text[lab_id] is not None:
prototype_text[lab_id].append(example.text)
else:
prototype_text[lab_id] = [example.text]
for lab_id in range(len(LABEL.vocab.itos)):
if len(prototype_text[lab_id]) == 0:
prototype_text[lab_id].append(['<pad>'])
if self.sample_per_class >= 1 and self.sample_per_class < len(prototype_text[lab_id]):
prototype_text[lab_id] = prototype_text[lab_id][:self.sample_per_class]
print('Task %s: picked up %s prototypes', (taskid, self.sample_per_class))
self.prototype_text_list.append(prototype_text)
def select_support_set(self, taskid, policy):
if policy == self.FIXED_FIRST:
supp_set = self.select_support_set_first(taskid)
elif policy == self.RANDOM:
supp_set = self.select_support_set_random(taskid)
return supp_set
def select_support_set_first(self, taskid):
prototype_text = self.prototype_text_list[taskid]
examples_text = []
for lab_id in range(len(prototype_text)):
examples_text.append(prototype_text[lab_id][0])
prototype_matrix = self.TEXT.numericalize(
self.TEXT.pad(x for x in examples_text),
device=self.config.device)
#if taskid == 0: #TODO test the consistency of the first example
# print examples_text
# print prototype_matrix
return prototype_matrix
def select_support_set_random(self, taskid, ):
prototype_text = self.prototype_text_list[taskid]
examples_text = []
for lab_id in range(len(prototype_text)):
rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)
examples_text.append(prototype_text[lab_id][rand_idx])
prototype_matrix = self.TEXT.numericalize(
self.TEXT.pad(x for x in examples_text),
device=self.config.device)
#if taskid == 0: #TODO test the consistency of the first example
# print examples_text
# print prototype_matrix
return prototype_matrix
def get_average_as_support(self, taskid, mnet_model):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
#print prototype_emb_list
#return torch.cat(prototype_emb_list, dim=0) #works for the new pytorch version
return torch.cat(prototype_emb_list, 0)
def get_average_and_std_as_support(self, taskid, mnet_model):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
prototype_std_list = []
for lab_id in range(len(prototype_text)):
N = len(prototype_text[lab_id])
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device, train=True)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
mean_vec = torch.mean(prototype_matrix, dim=0)
if N > 1:
#std_val = torch.sqrt((torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1))
std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1)
std_val = Variable(std_val.data)
else:
std_val = Variable(torch.from_numpy(np.array([1.0]).astype(np.float32))).cuda()
prototype_emb_list.append(mean_vec)
prototype_std_list.append(std_val)
#print prototype_std_list
return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list, 0)
def get_average_as_support_sample(self, taskid, mnet_model, sample_per_class):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
if sample_per_class > len(prototype_text[lab_id]):
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device)
else:
top_ind = range(len(prototype_text[lab_id]))
random.shuffle(top_ind)
top_ind = top_ind[:sample_per_class]
prototype_text_sample = [prototype_text[lab_id][i] for i in top_ind]
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text_sample),
device=self.config.device)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
return torch.cat(prototype_emb_list, 0)
def get_average_as_support_large(self, taskid, mnet_model, batchsize):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
num_batch = len(prototype_text[lab_id]) / batchsize
if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:
num_batch += 1
lab_emb_sum = []
for i in range(num_batch):
#print i
#print len(prototype_text[lab_id]), i*batchsize, (i+1) * batchsize
batch_text = prototype_text[lab_id][i * batchsize : min((i+1) * batchsize, len(prototype_text[lab_id]))]
#print batch_text
len_text = len(batch_text)
#print len_text
batch_prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in batch_text),
device=self.config.device, train=True)
#print batch_prototype_sent
prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)
prototype_matrix = Variable(prototype_matrix.data)
#prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
#prototype_emb_list.append(torch.sum(prototype_matrix, dim=0) / len_text)
#break
#TODO: the following three lines not equivalent to the two lines below
# lab_emb_sum.append(torch.sum(prototype_matrix, dim=0))
#lab_emb_sum = torch.sum( torch.cat(lab_emb_sum, 0), dim=0 )
#lab_emb_sum /= len(prototype_text[lab_id])
lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))
lab_emb_sum = torch.mean( torch.cat(lab_emb_sum, 0), dim=0 )
prototype_emb_list.append(lab_emb_sum)
return torch.cat(prototype_emb_list, 0)
|
normal
|
{
"blob_id": "13a2814e8744c6c09906d790185ed44fc2b3f23e",
"index": 3642,
"step-1": "<mask token>\n\n\nclass SupportSetManager(object):\n <mask token>\n <mask token>\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n <mask token>\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SupportSetManager(object):\n <mask token>\n <mask token>\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n <mask token>\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model,\n sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text[lab_id]), device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in\n top_ind]\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text_sample), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n batch_text = prototype_text[lab_id][i * batchsize:min((i + \n 1) * batchsize, len(prototype_text[lab_id]))]\n len_text = len(batch_text)\n batch_prototype_sent = self.TEXT.numericalize(self.TEXT.pad\n (x for x in batch_text), device=self.config.device,\n train=True)\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean(torch.cat(lab_emb_sum, 0), dim=0)\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n",
"step-3": "<mask token>\n\n\nclass SupportSetManager(object):\n <mask token>\n <mask token>\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def select_support_set_random(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)\n examples_text.append(prototype_text[lab_id][rand_idx])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model,\n sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text[lab_id]), device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in\n top_ind]\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text_sample), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n batch_text = prototype_text[lab_id][i * batchsize:min((i + \n 1) * batchsize, len(prototype_text[lab_id]))]\n len_text = len(batch_text)\n batch_prototype_sent = self.TEXT.numericalize(self.TEXT.pad\n (x for x in batch_text), device=self.config.device,\n train=True)\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean(torch.cat(lab_emb_sum, 0), dim=0)\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n",
"step-4": "<mask token>\n\n\nclass SupportSetManager(object):\n FIXED_FIRST = 0\n RANDOM = 1\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def select_support_set_random(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)\n examples_text.append(prototype_text[lab_id][rand_idx])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model,\n sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text[lab_id]), device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in\n top_ind]\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text_sample), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n batch_text = prototype_text[lab_id][i * batchsize:min((i + \n 1) * batchsize, len(prototype_text[lab_id]))]\n len_text = len(batch_text)\n batch_prototype_sent = self.TEXT.numericalize(self.TEXT.pad\n (x for x in batch_text), device=self.config.device,\n train=True)\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean(torch.cat(lab_emb_sum, 0), dim=0)\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n",
"step-5": "import random\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\n\nclass SupportSetManager(object):\n FIXED_FIRST = 0\n RANDOM = 1\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n (TEXT, LABEL, train, dev, test) = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n\n print('Picking up prototypes')\n self.prototype_text_list = []\n\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n #print taskid, LABEL.vocab\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n\n if self.sample_per_class >= 1 and self.sample_per_class < len(prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.sample_per_class]\n\n print('Task %s: picked up %s prototypes', (taskid, self.sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n\n prototype_matrix = self.TEXT.numericalize(\n self.TEXT.pad(x for x in examples_text),\n device=self.config.device)\n #if taskid == 0: #TODO test the consistency of the first example\n # print examples_text\n # print prototype_matrix\n\n return prototype_matrix\n\n def select_support_set_random(self, taskid, ):\n prototype_text = self.prototype_text_list[taskid]\n\n examples_text = []\n for lab_id in range(len(prototype_text)):\n rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)\n examples_text.append(prototype_text[lab_id][rand_idx])\n\n prototype_matrix = self.TEXT.numericalize(\n self.TEXT.pad(x for x in examples_text),\n device=self.config.device)\n #if taskid == 0: #TODO test the consistency of the first example\n # print examples_text\n # print prototype_matrix\n\n return prototype_matrix\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text[lab_id]),\n device=self.config.device)\n\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n #print prototype_emb_list\n #return torch.cat(prototype_emb_list, dim=0) #works for the new pytorch version\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text[lab_id]),\n device=self.config.device, train=True)\n\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n #std_val = torch.sqrt((torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1))\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n #print prototype_std_list\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list, 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model, sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text[lab_id]),\n device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in top_ind]\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text_sample),\n device=self.config.device)\n\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n #print i\n #print len(prototype_text[lab_id]), i*batchsize, (i+1) * batchsize\n batch_text = prototype_text[lab_id][i * batchsize : min((i+1) * batchsize, len(prototype_text[lab_id]))]\n #print batch_text\n len_text = len(batch_text)\n #print len_text\n batch_prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in batch_text),\n device=self.config.device, train=True)\n #print batch_prototype_sent\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n\n #prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n #prototype_emb_list.append(torch.sum(prototype_matrix, dim=0) / len_text)\n #break\n #TODO: the following three lines not equivalent to the two lines below\n # lab_emb_sum.append(torch.sum(prototype_matrix, dim=0))\n #lab_emb_sum = torch.sum( torch.cat(lab_emb_sum, 0), dim=0 )\n #lab_emb_sum /= len(prototype_text[lab_id])\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean( torch.cat(lab_emb_sum, 0), dim=0 )\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n\n\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
p1=float(input('digite o p1:'))
c1=float(input('digite o c1:'))
p2=float(input('digite o p2:'))
c2=float(input('digite o c2:'))
if p1*c1=p2*c2:
print('O')
if pi*c1>p2*c2:
print('-1')
else:
print('1')
|
normal
|
{
"blob_id": "210fcb497334ad8bf5433b917fc199c3e22f0f6e",
"index": 6978,
"step-1": "# -*- coding: utf-8 -*-\n#COMECE AQUI ABAIXO\n\np1=float(input('digite o p1:'))\nc1=float(input('digite o c1:'))\np2=float(input('digite o p2:'))\nc2=float(input('digite o c2:'))\n\nif p1*c1=p2*c2:\n print('O')\n\nif pi*c1>p2*c2:\n print('-1')\n\nelse:\n print('1')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bind = '0.0.0.0:' + str(os.environ.get('MAESTRO_PORT', 5005))
workers = os.environ.get('MAESTRO_GWORKERS', 2)
<|reserved_special_token_1|>
import os
bind = '0.0.0.0:' + str(os.environ.get('MAESTRO_PORT', 5005))
workers = os.environ.get('MAESTRO_GWORKERS', 2)
<|reserved_special_token_1|>
import os
bind = "0.0.0.0:" + str(os.environ.get("MAESTRO_PORT", 5005))
workers = os.environ.get("MAESTRO_GWORKERS", 2)
|
flexible
|
{
"blob_id": "818e6842d4a1f8978ec14bca06981ec933c00376",
"index": 6280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbind = '0.0.0.0:' + str(os.environ.get('MAESTRO_PORT', 5005))\nworkers = os.environ.get('MAESTRO_GWORKERS', 2)\n",
"step-3": "import os\nbind = '0.0.0.0:' + str(os.environ.get('MAESTRO_PORT', 5005))\nworkers = os.environ.get('MAESTRO_GWORKERS', 2)\n",
"step-4": "import os\n\nbind = \"0.0.0.0:\" + str(os.environ.get(\"MAESTRO_PORT\", 5005))\nworkers = os.environ.get(\"MAESTRO_GWORKERS\", 2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django import forms
from django.forms import ModelForm, fields, widgets
from .models import NewsStory
class StoryForm(ModelForm):
class Meta:
model = NewsStory
fields = ['title' , 'pub_date' , 'content']
widgets = {
'pub_date': forms.DateInput(format=('%m/%d/%Y'), attrs={'class':'form-control', 'placeholder':'select a date', 'type':'date'}),
}
|
normal
|
{
"blob_id": "47a5ddcea2f6d8ce80793192d26c98ccc0e0340d",
"index": 1771,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StoryForm(ModelForm):\n\n\n class Meta:\n model = NewsStory\n fields = ['title', 'pub_date', 'content']\n widgets = {'pub_date': forms.DateInput(format='%m/%d/%Y', attrs={\n 'class': 'form-control', 'placeholder': 'select a date', 'type':\n 'date'})}\n",
"step-3": "from django import forms\nfrom django.forms import ModelForm, fields, widgets\nfrom .models import NewsStory\n\n\nclass StoryForm(ModelForm):\n\n\n class Meta:\n model = NewsStory\n fields = ['title', 'pub_date', 'content']\n widgets = {'pub_date': forms.DateInput(format='%m/%d/%Y', attrs={\n 'class': 'form-control', 'placeholder': 'select a date', 'type':\n 'date'})}\n",
"step-4": "from django import forms\nfrom django.forms import ModelForm, fields, widgets\nfrom .models import NewsStory\n\nclass StoryForm(ModelForm):\n class Meta:\n model = NewsStory\n fields = ['title' , 'pub_date' , 'content']\n widgets = {\n 'pub_date': forms.DateInput(format=('%m/%d/%Y'), attrs={'class':'form-control', 'placeholder':'select a date', 'type':'date'}),\n\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import json, csv
import pandas as pd
API_KEY = 'AIzaSyALrKc3-W0u_Ku-J2OpyjnqFhV5wKlwKGs'
list_video_id = ['7cmvABXyUC0', '9eH-7x7swEM', 'JndzGxbwvG0', 'l0P5_E6J_g0']
fieldnames = ['videoid', 'viewCount', 'likeCount', 'dislikeCount', 'favoriteCount', 'commentCount']
rows = []
for video_id in list_video_id:
url = "https://www.googleapis.com/youtube/v3/videos?id=" + video_id + "&part=statistics&key=" + API_KEY
response = requests.get(url).json()
for i in response['items']:
rows.append({"videoid": i['id'],
"viewCount": i['statistics']['viewCount'],
"likeCount": i['statistics']['likeCount'],
"dislikeCount": i['statistics']['dislikeCount'],
"favoriteCount": i['statistics']['favoriteCount'],
"commentCount": i['statistics']['commentCount']})
print(rows)
with open(r'get_api_youtube.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for j in rows:
writer.writerow(j)
|
normal
|
{
"blob_id": "3c341b17f260cc745c8659ee769493216522ac19",
"index": 2073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor video_id in list_video_id:\n url = ('https://www.googleapis.com/youtube/v3/videos?id=' + video_id +\n '&part=statistics&key=' + API_KEY)\n response = requests.get(url).json()\n for i in response['items']:\n rows.append({'videoid': i['id'], 'viewCount': i['statistics'][\n 'viewCount'], 'likeCount': i['statistics']['likeCount'],\n 'dislikeCount': i['statistics']['dislikeCount'],\n 'favoriteCount': i['statistics']['favoriteCount'],\n 'commentCount': i['statistics']['commentCount']})\nprint(rows)\nwith open('get_api_youtube.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for j in rows:\n writer.writerow(j)\n",
"step-3": "<mask token>\nAPI_KEY = 'AIzaSyALrKc3-W0u_Ku-J2OpyjnqFhV5wKlwKGs'\nlist_video_id = ['7cmvABXyUC0', '9eH-7x7swEM', 'JndzGxbwvG0', 'l0P5_E6J_g0']\nfieldnames = ['videoid', 'viewCount', 'likeCount', 'dislikeCount',\n 'favoriteCount', 'commentCount']\nrows = []\nfor video_id in list_video_id:\n url = ('https://www.googleapis.com/youtube/v3/videos?id=' + video_id +\n '&part=statistics&key=' + API_KEY)\n response = requests.get(url).json()\n for i in response['items']:\n rows.append({'videoid': i['id'], 'viewCount': i['statistics'][\n 'viewCount'], 'likeCount': i['statistics']['likeCount'],\n 'dislikeCount': i['statistics']['dislikeCount'],\n 'favoriteCount': i['statistics']['favoriteCount'],\n 'commentCount': i['statistics']['commentCount']})\nprint(rows)\nwith open('get_api_youtube.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for j in rows:\n writer.writerow(j)\n",
"step-4": "import requests\nimport json, csv\nimport pandas as pd\nAPI_KEY = 'AIzaSyALrKc3-W0u_Ku-J2OpyjnqFhV5wKlwKGs'\nlist_video_id = ['7cmvABXyUC0', '9eH-7x7swEM', 'JndzGxbwvG0', 'l0P5_E6J_g0']\nfieldnames = ['videoid', 'viewCount', 'likeCount', 'dislikeCount',\n 'favoriteCount', 'commentCount']\nrows = []\nfor video_id in list_video_id:\n url = ('https://www.googleapis.com/youtube/v3/videos?id=' + video_id +\n '&part=statistics&key=' + API_KEY)\n response = requests.get(url).json()\n for i in response['items']:\n rows.append({'videoid': i['id'], 'viewCount': i['statistics'][\n 'viewCount'], 'likeCount': i['statistics']['likeCount'],\n 'dislikeCount': i['statistics']['dislikeCount'],\n 'favoriteCount': i['statistics']['favoriteCount'],\n 'commentCount': i['statistics']['commentCount']})\nprint(rows)\nwith open('get_api_youtube.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for j in rows:\n writer.writerow(j)\n",
"step-5": "import requests\nimport json, csv\nimport pandas as pd\n\nAPI_KEY = 'AIzaSyALrKc3-W0u_Ku-J2OpyjnqFhV5wKlwKGs'\nlist_video_id = ['7cmvABXyUC0', '9eH-7x7swEM', 'JndzGxbwvG0', 'l0P5_E6J_g0']\nfieldnames = ['videoid', 'viewCount', 'likeCount', 'dislikeCount', 'favoriteCount', 'commentCount']\nrows = []\nfor video_id in list_video_id:\n url = \"https://www.googleapis.com/youtube/v3/videos?id=\" + video_id + \"&part=statistics&key=\" + API_KEY\n response = requests.get(url).json()\n for i in response['items']:\n rows.append({\"videoid\": i['id'],\n \"viewCount\": i['statistics']['viewCount'],\n \"likeCount\": i['statistics']['likeCount'],\n \"dislikeCount\": i['statistics']['dislikeCount'],\n \"favoriteCount\": i['statistics']['favoriteCount'],\n \"commentCount\": i['statistics']['commentCount']})\nprint(rows)\nwith open(r'get_api_youtube.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for j in rows:\n writer.writerow(j)\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
import sys
sys.path.insert(0, '../../common/python/')
from primality import prime_factors
"""
phi(n) = n*sum_{p|n} (1 - 1/p)
1/phi(n) = (1/n)*sum_{p|n} p/(p - 1)
n/phi(n) = sum_{p|n} p/(p - 1)
"""
def n_over_phi(n):
top = 1
bot = 1
pfactors = prime_factors(n)
for p, count in pfactors.items():
top *= p
bot *= (p - 1)
return top / bot
def maximise_n_over_phi(upto):
max_value = 0
max_n = 0
for n in range(2, upto+1):
n_over_phi_ = n_over_phi(n)
if n_over_phi_ > max_value:
max_value = n_over_phi_
max_n = n
return max_n
def main():
print(maximise_n_over_phi(1000000))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "2d248ac1df1845bc5a2ee62a7171c1c47ca6d0ca",
"index": 3665,
"step-1": "<mask token>\n\n\ndef n_over_phi(n):\n top = 1\n bot = 1\n pfactors = prime_factors(n)\n for p, count in pfactors.items():\n top *= p\n bot *= p - 1\n return top / bot\n\n\ndef maximise_n_over_phi(upto):\n max_value = 0\n max_n = 0\n for n in range(2, upto + 1):\n n_over_phi_ = n_over_phi(n)\n if n_over_phi_ > max_value:\n max_value = n_over_phi_\n max_n = n\n return max_n\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef n_over_phi(n):\n top = 1\n bot = 1\n pfactors = prime_factors(n)\n for p, count in pfactors.items():\n top *= p\n bot *= p - 1\n return top / bot\n\n\ndef maximise_n_over_phi(upto):\n max_value = 0\n max_n = 0\n for n in range(2, upto + 1):\n n_over_phi_ = n_over_phi(n)\n if n_over_phi_ > max_value:\n max_value = n_over_phi_\n max_n = n\n return max_n\n\n\ndef main():\n print(maximise_n_over_phi(1000000))\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.insert(0, '../../common/python/')\n<mask token>\n\n\ndef n_over_phi(n):\n top = 1\n bot = 1\n pfactors = prime_factors(n)\n for p, count in pfactors.items():\n top *= p\n bot *= p - 1\n return top / bot\n\n\ndef maximise_n_over_phi(upto):\n max_value = 0\n max_n = 0\n for n in range(2, upto + 1):\n n_over_phi_ = n_over_phi(n)\n if n_over_phi_ > max_value:\n max_value = n_over_phi_\n max_n = n\n return max_n\n\n\ndef main():\n print(maximise_n_over_phi(1000000))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nsys.path.insert(0, '../../common/python/')\nfrom primality import prime_factors\n<mask token>\n\n\ndef n_over_phi(n):\n top = 1\n bot = 1\n pfactors = prime_factors(n)\n for p, count in pfactors.items():\n top *= p\n bot *= p - 1\n return top / bot\n\n\ndef maximise_n_over_phi(upto):\n max_value = 0\n max_n = 0\n for n in range(2, upto + 1):\n n_over_phi_ = n_over_phi(n)\n if n_over_phi_ > max_value:\n max_value = n_over_phi_\n max_n = n\n return max_n\n\n\ndef main():\n print(maximise_n_over_phi(1000000))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\nsys.path.insert(0, '../../common/python/')\n\nfrom primality import prime_factors\n\n\"\"\"\nphi(n) = n*sum_{p|n} (1 - 1/p)\n1/phi(n) = (1/n)*sum_{p|n} p/(p - 1)\nn/phi(n) = sum_{p|n} p/(p - 1)\n\"\"\"\n\n\ndef n_over_phi(n):\n top = 1\n bot = 1\n\n pfactors = prime_factors(n)\n\n for p, count in pfactors.items():\n top *= p\n bot *= (p - 1)\n return top / bot\n\n\ndef maximise_n_over_phi(upto):\n max_value = 0\n max_n = 0\n\n for n in range(2, upto+1):\n n_over_phi_ = n_over_phi(n)\n if n_over_phi_ > max_value:\n max_value = n_over_phi_\n max_n = n\n return max_n\n\n\ndef main():\n print(maximise_n_over_phi(1000000))\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(5)
<|reserved_special_token_0|>
model.add(Dense(64, input_dim=input_data_number, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[
'accuracy'])
<|reserved_special_token_0|>
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
<|reserved_special_token_0|>
print('loss_and_metrics : ' + str(loss_and_metrics))
<|reserved_special_token_0|>
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(5)
match_dic = {}
zoo_class = pd.read_csv('zoo.csv', sep=',', header=0)
zoo_class.columns = zoo_class.columns.str.replace(' ', '_')
input_data_header = list(zoo_class.columns.difference(['animal_name',
'class_type']))
input_data_number = len(input_data_header)
label = zoo_class['class_type']
start_time = datetime.now()
train_data, test_data, train_label, test_label = train_test_split(zoo_class
[input_data_header], label)
train_label = to_categorical(train_label, num_classes=7)
test_label = to_categorical(test_label, num_classes=7)
model = Sequential()
model.add(Dense(64, input_dim=input_data_number, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[
'accuracy'])
hist = model.fit(train_data, train_label, epochs=20000, batch_size=64,
validation_data=(test_data, test_label))
end_time = datetime.now()
<|reserved_special_token_0|>
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
scores = model.evaluate(test_data, test_label)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
<|reserved_special_token_1|>
import pandas as pd
from datetime import datetime
from sklearn.model_selection import train_test_split
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
np.random.seed(5)
match_dic = {}
zoo_class = pd.read_csv('zoo.csv', sep=',', header=0)
zoo_class.columns = zoo_class.columns.str.replace(' ', '_')
input_data_header = list(zoo_class.columns.difference(['animal_name',
'class_type']))
input_data_number = len(input_data_header)
label = zoo_class['class_type']
start_time = datetime.now()
train_data, test_data, train_label, test_label = train_test_split(zoo_class
[input_data_header], label)
train_label = to_categorical(train_label, num_classes=7)
test_label = to_categorical(test_label, num_classes=7)
model = Sequential()
model.add(Dense(64, input_dim=input_data_number, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[
'accuracy'])
hist = model.fit(train_data, train_label, epochs=20000, batch_size=64,
validation_data=(test_data, test_label))
end_time = datetime.now()
import matplotlib.pyplot as plt
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
scores = model.evaluate(test_data, test_label)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
<|reserved_special_token_1|>
# 데이터 출처: kaggle
# 데이터 개요: 511, 유리를 위한 다양한 속성(화학원소)들로부터 type 구별
# 데이터 예측 모델: 이진클래스
# 적용 머신러닝 모델: 깊은 다층 퍼셉트론 신경망
# 훈련 데이터셋: 160건
# 검증 데이터셋: 건
# 시험 데이터셋: 수집데이터로서 시험셋을 확보할 수 없으므로 고려하지 않음
# 입력 데이터: 10개 항목의 데이터
# 은닉층: 2개
# 사용한 활성화 함수
# - 제1 은닉층: Relu
# - 제2 은닉층: Relu
# - Output Layer: Softmax
# 사용한 손실함수: categorical_crossentropy
# 사용한 Optimizer: rmsprop
# Tensorflow 버전: 2.0.0
# 파이썬버전: 3.7.4
import pandas as pd
from datetime import datetime
from sklearn.model_selection import train_test_split
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
np.random.seed(5)
match_dic={}
zoo_class = pd.read_csv('zoo.csv',sep=',',header=0)
zoo_class.columns = zoo_class.columns.str.replace(' ','_')
# 전체 독립변수 식별
input_data_header = list(zoo_class.columns.difference(["animal_name","class_type"]))
input_data_number = len(input_data_header)
label = zoo_class["class_type"]
start_time = datetime.now()
train_data, test_data, train_label, test_label = train_test_split(zoo_class[input_data_header],label)
train_label = to_categorical(train_label, num_classes=7)
test_label = to_categorical(test_label, num_classes=7)
# 훈련셋과 시험셋 불러오기
# x_train = x_train.reshape(60000, width * height).astype('float32') / 255.0
# x_test = x_test.reshape(10000, width * height).astype('float32') / 255.0
# 모델 구성하기
model = Sequential()
model.add(Dense(64, input_dim=input_data_number, activation='relu'))
model.add(Dense(64, activation='relu'))
# model.add(Dense(6, activation='sigmoid'))
model.add(Dense(7, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# 4. 모델 학습시키기
hist = model.fit(train_data, train_label, epochs=20000, batch_size=64, validation_data=(test_data, test_label))
# hist = model.fit(train_data, train_label, epochs=1000, batch_size=64)
end_time = datetime.now()
# 5. 학습과정 살펴보기
import matplotlib.pyplot as plt
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
# acc_ax.plot(hist.history['acc'], 'b', label='train acc')
acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
# acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')
acc_ax.plot(hist.history['val_accuracy'],'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
# 6. 모델 평가하기
loss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
scores = model.evaluate(test_data, test_label)
print("%s: %.2f%%"%(model.metrics_names[1],scores[1]*100))
|
flexible
|
{
"blob_id": "bfa5739949c26758e3762fcff8347d23ad70f704",
"index": 6114,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(5)\n<mask token>\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(7, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\n<mask token>\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\nplt.show()\n<mask token>\nprint('loss_and_metrics : ' + str(loss_and_metrics))\n<mask token>\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n",
"step-3": "<mask token>\nnp.random.seed(5)\nmatch_dic = {}\nzoo_class = pd.read_csv('zoo.csv', sep=',', header=0)\nzoo_class.columns = zoo_class.columns.str.replace(' ', '_')\ninput_data_header = list(zoo_class.columns.difference(['animal_name',\n 'class_type']))\ninput_data_number = len(input_data_header)\nlabel = zoo_class['class_type']\nstart_time = datetime.now()\ntrain_data, test_data, train_label, test_label = train_test_split(zoo_class\n [input_data_header], label)\ntrain_label = to_categorical(train_label, num_classes=7)\ntest_label = to_categorical(test_label, num_classes=7)\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(7, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nhist = model.fit(train_data, train_label, epochs=20000, batch_size=64,\n validation_data=(test_data, test_label))\nend_time = datetime.now()\n<mask token>\nfig, loss_ax = plt.subplots()\nacc_ax = loss_ax.twinx()\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\nplt.show()\nloss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)\nprint('loss_and_metrics : ' + str(loss_and_metrics))\nscores = model.evaluate(test_data, test_label)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n",
"step-4": "import pandas as pd\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\nnp.random.seed(5)\nmatch_dic = {}\nzoo_class = pd.read_csv('zoo.csv', sep=',', header=0)\nzoo_class.columns = zoo_class.columns.str.replace(' ', '_')\ninput_data_header = list(zoo_class.columns.difference(['animal_name',\n 'class_type']))\ninput_data_number = len(input_data_header)\nlabel = zoo_class['class_type']\nstart_time = datetime.now()\ntrain_data, test_data, train_label, test_label = train_test_split(zoo_class\n [input_data_header], label)\ntrain_label = to_categorical(train_label, num_classes=7)\ntest_label = to_categorical(test_label, num_classes=7)\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(7, activation='softmax'))\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nhist = model.fit(train_data, train_label, epochs=20000, batch_size=64,\n validation_data=(test_data, test_label))\nend_time = datetime.now()\nimport matplotlib.pyplot as plt\nfig, loss_ax = plt.subplots()\nacc_ax = loss_ax.twinx()\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\nplt.show()\nloss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)\nprint('loss_and_metrics : ' + str(loss_and_metrics))\nscores = model.evaluate(test_data, test_label)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\n",
"step-5": "# 데이터 출처: kaggle\n# 데이터 개요: 511, 유리를 위한 다양한 속성(화학원소)들로부터 type 구별\n# 데이터 예측 모델: 이진클래스\n# 적용 머신러닝 모델: 깊은 다층 퍼셉트론 신경망\n# 훈련 데이터셋: 160건\n# 검증 데이터셋: 건\n# 시험 데이터셋: 수집데이터로서 시험셋을 확보할 수 없으므로 고려하지 않음\n# 입력 데이터: 10개 항목의 데이터\n# 은닉층: 2개\n# 사용한 활성화 함수\n# - 제1 은닉층: Relu\n# - 제2 은닉층: Relu\n# - Output Layer: Softmax\n# 사용한 손실함수: categorical_crossentropy\n# 사용한 Optimizer: rmsprop\n# Tensorflow 버전: 2.0.0\n# 파이썬버전: 3.7.4\n\nimport pandas as pd\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\n\nnp.random.seed(5)\nmatch_dic={}\n\nzoo_class = pd.read_csv('zoo.csv',sep=',',header=0)\nzoo_class.columns = zoo_class.columns.str.replace(' ','_')\n\n\n# 전체 독립변수 식별\ninput_data_header = list(zoo_class.columns.difference([\"animal_name\",\"class_type\"]))\ninput_data_number = len(input_data_header)\nlabel = zoo_class[\"class_type\"]\n\nstart_time = datetime.now()\n\ntrain_data, test_data, train_label, test_label = train_test_split(zoo_class[input_data_header],label)\ntrain_label = to_categorical(train_label, num_classes=7)\ntest_label = to_categorical(test_label, num_classes=7)\n\n# 훈련셋과 시험셋 불러오기\n# x_train = x_train.reshape(60000, width * height).astype('float32') / 255.0\n# x_test = x_test.reshape(10000, width * height).astype('float32') / 255.0\n\n# 모델 구성하기\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=input_data_number, activation='relu'))\nmodel.add(Dense(64, activation='relu'))\n# model.add(Dense(6, activation='sigmoid'))\nmodel.add(Dense(7, activation='softmax'))\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n\n# 4. 모델 학습시키기\nhist = model.fit(train_data, train_label, epochs=20000, batch_size=64, validation_data=(test_data, test_label))\n# hist = model.fit(train_data, train_label, epochs=1000, batch_size=64)\n\nend_time = datetime.now()\n\n# 5. 학습과정 살펴보기\nimport matplotlib.pyplot as plt\n\nfig, loss_ax = plt.subplots()\n\nacc_ax = loss_ax.twinx()\n\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\n\n# acc_ax.plot(hist.history['acc'], 'b', label='train acc')\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\n# acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')\nacc_ax.plot(hist.history['val_accuracy'],'g', label='val acc')\n\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuray')\n\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\n\nplt.show()\n\n# 6. 모델 평가하기\nloss_and_metrics = model.evaluate(test_data, test_label, batch_size=32)\nprint('loss_and_metrics : ' + str(loss_and_metrics))\n\nscores = model.evaluate(test_data, test_label)\nprint(\"%s: %.2f%%\"%(model.metrics_names[1],scores[1]*100))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] += 1
return charcount
<|reserved_special_token_0|>
def englishFreqMatch(message):
matchscore = 0
freqOrder = getFreqOrder(message.lower())
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore += 1
return matchscore
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getCanditatemap():
return dict.fromkeys((chr(i) for i in range(length)), 0)
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] += 1
return charcount
<|reserved_special_token_0|>
def englishFreqMatch(message):
matchscore = 0
freqOrder = getFreqOrder(message.lower())
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore += 1
return matchscore
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ETAOIN = (
' etaoinsrhldcumgyfpwb.,vk0-\'x)(1j2:q"/5!?z346879%[]*=+|_;\\>$#^&@<~{}`')
length = 128
def getCanditatemap():
return dict.fromkeys((chr(i) for i in range(length)), 0)
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] += 1
return charcount
def getFreqOrder(mess):
lettertofreq = getLettercount(mess)
freqtochar = {}
for i in range(length):
i = chr(i)
if lettertofreq[i] not in freqtochar:
freqtochar[lettertofreq[i]] = [i]
else:
freqtochar[lettertofreq[i]].append(i)
for freq in freqtochar:
freqtochar[freq].sort(key=ETAOIN.find, reverse=True)
freqtochar[freq] = ''.join(freqtochar[freq])
freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=
True))
freqorder = []
values = freqpairs.values()
for freqpair in values:
freqorder.append(freqpair)
return ''.join(freqorder)
def englishFreqMatch(message):
matchscore = 0
freqOrder = getFreqOrder(message.lower())
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore += 1
return matchscore
<|reserved_special_token_1|>
import operator, pdb, collections, string
ETAOIN = (
' etaoinsrhldcumgyfpwb.,vk0-\'x)(1j2:q"/5!?z346879%[]*=+|_;\\>$#^&@<~{}`')
length = 128
def getCanditatemap():
return dict.fromkeys((chr(i) for i in range(length)), 0)
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] += 1
return charcount
def getFreqOrder(mess):
lettertofreq = getLettercount(mess)
freqtochar = {}
for i in range(length):
i = chr(i)
if lettertofreq[i] not in freqtochar:
freqtochar[lettertofreq[i]] = [i]
else:
freqtochar[lettertofreq[i]].append(i)
for freq in freqtochar:
freqtochar[freq].sort(key=ETAOIN.find, reverse=True)
freqtochar[freq] = ''.join(freqtochar[freq])
freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=
True))
freqorder = []
values = freqpairs.values()
for freqpair in values:
freqorder.append(freqpair)
return ''.join(freqorder)
def englishFreqMatch(message):
matchscore = 0
freqOrder = getFreqOrder(message.lower())
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore += 1
return matchscore
<|reserved_special_token_1|>
#import getCanditatemap() from E_18_hacksub
import operator, pdb, collections, string
ETAOIN = """ etaoinsrhldcumgyfpwb.,vk0-'x)(1j2:q"/5!?z346879%[]*=+|_;\>$#^&@<~{}`""" #order taken from https://mdickens.me/typing/theory-of-letter-frequency.html, with space added at the start, 69 characters overall
length = 128
#ETAOIN ="ETAOINSHRDLCUMWFGYPBVKJXQZ"
def getCanditatemap():
return (dict.fromkeys((chr(i) for i in range (length)),0)) # https://stackoverflow.com/questions/2241891/how-to-initialize-a-dict-with-keys-from-a-list-and-empty-value-in-python/2241904
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] +=1
return charcount
def getFreqOrder(mess):
#get a dictionary of each letter and its frequency count
lettertofreq = getLettercount(mess)
# second, make a dictionary of each frequency count to each letter(s) with that frequency
freqtochar = {}
for i in range(length):
i=chr(i)
if lettertofreq[i] not in freqtochar: # look for frequencies not present
freqtochar[lettertofreq[i]] = [i] # add if not present, else append
else:
freqtochar[lettertofreq[i]].append(i)
#reverse ETAOIN order, for each list of letters (per frequency)
for freq in freqtochar:
freqtochar[freq].sort(key=ETAOIN.find, reverse=True)
freqtochar[freq] = ''.join(freqtochar[freq]) # convert to string
# sort them in order of frequency
#freqpairs = sorted(freqtochar.items(), key=operator.itemgetter(0), reverse=True)
freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=True))
# extractst the values and joins them together
freqorder = []
#print freqtochar
values = freqpairs.values() # grabs the values only
for freqpair in values:
#print freqpair
#pdb.set_trace()
freqorder.append(freqpair)
return ''.join(freqorder)
def englishFreqMatch(message):
#print message
matchscore =0
freqOrder = getFreqOrder(message.lower()) # convert to lower case as we are just looking for frequency match score, so case of the letter should not matter
#print freqOrder
#pdb.set_trace()
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore +=1
return matchscore
|
flexible
|
{
"blob_id": "63a9060e9933cc37b7039833be5f071cc7bf45bf",
"index": 7873,
"step-1": "<mask token>\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\n<mask token>\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-2": "<mask token>\n\n\ndef getCanditatemap():\n return dict.fromkeys((chr(i) for i in range(length)), 0)\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\n<mask token>\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-3": "<mask token>\nETAOIN = (\n ' etaoinsrhldcumgyfpwb.,vk0-\\'x)(1j2:q\"/5!?z346879%[]*=+|_;\\\\>$#^&@<~{}`')\nlength = 128\n\n\ndef getCanditatemap():\n return dict.fromkeys((chr(i) for i in range(length)), 0)\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\ndef getFreqOrder(mess):\n lettertofreq = getLettercount(mess)\n freqtochar = {}\n for i in range(length):\n i = chr(i)\n if lettertofreq[i] not in freqtochar:\n freqtochar[lettertofreq[i]] = [i]\n else:\n freqtochar[lettertofreq[i]].append(i)\n for freq in freqtochar:\n freqtochar[freq].sort(key=ETAOIN.find, reverse=True)\n freqtochar[freq] = ''.join(freqtochar[freq])\n freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=\n True))\n freqorder = []\n values = freqpairs.values()\n for freqpair in values:\n freqorder.append(freqpair)\n return ''.join(freqorder)\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-4": "import operator, pdb, collections, string\nETAOIN = (\n ' etaoinsrhldcumgyfpwb.,vk0-\\'x)(1j2:q\"/5!?z346879%[]*=+|_;\\\\>$#^&@<~{}`')\nlength = 128\n\n\ndef getCanditatemap():\n return dict.fromkeys((chr(i) for i in range(length)), 0)\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\ndef getFreqOrder(mess):\n lettertofreq = getLettercount(mess)\n freqtochar = {}\n for i in range(length):\n i = chr(i)\n if lettertofreq[i] not in freqtochar:\n freqtochar[lettertofreq[i]] = [i]\n else:\n freqtochar[lettertofreq[i]].append(i)\n for freq in freqtochar:\n freqtochar[freq].sort(key=ETAOIN.find, reverse=True)\n freqtochar[freq] = ''.join(freqtochar[freq])\n freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=\n True))\n freqorder = []\n values = freqpairs.values()\n for freqpair in values:\n freqorder.append(freqpair)\n return ''.join(freqorder)\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-5": "#import getCanditatemap() from E_18_hacksub\nimport operator, pdb, collections, string\n\nETAOIN = \"\"\" etaoinsrhldcumgyfpwb.,vk0-'x)(1j2:q\"/5!?z346879%[]*=+|_;\\>$#^&@<~{}`\"\"\" #order taken from https://mdickens.me/typing/theory-of-letter-frequency.html, with space added at the start, 69 characters overall\nlength = 128\n#ETAOIN =\"ETAOINSHRDLCUMWFGYPBVKJXQZ\"\n\ndef getCanditatemap():\n return (dict.fromkeys((chr(i) for i in range (length)),0)) # https://stackoverflow.com/questions/2241891/how-to-initialize-a-dict-with-keys-from-a-list-and-empty-value-in-python/2241904\n\ndef getLettercount(mess):\n \n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] +=1\n \n return charcount\n\ndef getFreqOrder(mess):\n\n #get a dictionary of each letter and its frequency count\n lettertofreq = getLettercount(mess)\n\n # second, make a dictionary of each frequency count to each letter(s) with that frequency\n freqtochar = {}\n for i in range(length):\n i=chr(i)\n if lettertofreq[i] not in freqtochar: # look for frequencies not present\n freqtochar[lettertofreq[i]] = [i] # add if not present, else append\n else:\n freqtochar[lettertofreq[i]].append(i)\n\n #reverse ETAOIN order, for each list of letters (per frequency)\n for freq in freqtochar:\n freqtochar[freq].sort(key=ETAOIN.find, reverse=True)\n freqtochar[freq] = ''.join(freqtochar[freq]) # convert to string\n \n # sort them in order of frequency\n #freqpairs = sorted(freqtochar.items(), key=operator.itemgetter(0), reverse=True)\n freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=True))\n \n # extractst the values and joins them together\n freqorder = []\n #print freqtochar\n values = freqpairs.values() # grabs the values only\n for freqpair in values:\n #print freqpair\n #pdb.set_trace() \n freqorder.append(freqpair)\n\n return ''.join(freqorder)\n\ndef englishFreqMatch(message):\n \n #print message\n matchscore =0\n freqOrder = getFreqOrder(message.lower()) # convert to lower case as we are just looking for frequency match score, so case of the letter should not matter\n #print freqOrder\n #pdb.set_trace()\n\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore +=1\n return matchscore\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def oddCells(self, m, n, indices):
"""
:type m: int
:type n: int
:type indices: List[List[int]]
:rtype: int
"""
indice_x_dict = {}
indice_y_dict = {}
for x, y in indices:
indice_x_dict[x] = indice_x_dict.get(x, 0) + 1
indice_y_dict[y] = indice_y_dict.get(y, 0) + 1
x_num = 0
y_num = 0
for key, item in indice_x_dict.items():
if item % 2 == 1:
x_num += 1
for key, item in indice_y_dict.items():
if item % 2 == 1:
y_num += 1
return x_num * n + y_num * m - x_num * y_num * 2
|
flexible
|
{
"blob_id": "148b849ae43617dde8dbb0c949defa2f390ce5cd",
"index": 9902,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def oddCells(self, m, n, indices):\n \"\"\"\n :type m: int\n :type n: int\n :type indices: List[List[int]]\n :rtype: int\n \"\"\"\n indice_x_dict = {}\n indice_y_dict = {}\n for x, y in indices:\n indice_x_dict[x] = indice_x_dict.get(x, 0) + 1\n indice_y_dict[y] = indice_y_dict.get(y, 0) + 1\n x_num = 0\n y_num = 0\n for key, item in indice_x_dict.items():\n if item % 2 == 1:\n x_num += 1\n for key, item in indice_y_dict.items():\n if item % 2 == 1:\n y_num += 1\n return x_num * n + y_num * m - x_num * y_num * 2\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import tkinter.ttk
import tkinter as tk
def update_info(info_t, data):
# temp = info_t.selection_set("x")
# print(temp)
# info_t.delete(temp)
# temp = info_t.selection_set("y")
# info_t.delete(temp)
pass
def path_to_string(s):
res = ""
for i in range(len(s)-1):
res += str(s[i][0])
res += ", "
res += str(s[i][1])
res += " > "
res += str(s[i][0])
res += ", "
res += str(s[i][1])
return res
def read_file_formatting(self, patrol, target):
data = []
for i in range(len(patrol)):
data.append([])
for j in range(len(target)+7):
data[i].append(0)
for i in range(len(patrol)):
data[i][0] = patrol[i].get_x()
data[i][1] = patrol[i].get_y()
# 현 탐지
data[i][2] = 0
# 총 탐지
data[i][3] = 0
# 경로
data[i][-1] = self.path_to_string(patrol[i].get_path())
# 탐지 범위
data[i][-2] = patrol[i].get_detection_dist()
# Knot
data[i][-3] = patrol[i].get_knot()
# target detection time
for j in range(len(target)):
data[i][4+j] = 0
return data
def init_info(frame_i, init_data):
## setting info
info_lbl = tk.Label(frame_i, text="Ship Info")
info_lbl.pack()
# setting treeView
info_tree = tk.ttk.Treeview(frame_i, columns=["함정 1", "함정 2", "함정 3"], displaycolumns=["함정 1", "함정 2", "함정 3"])
info_tree.pack()
# 0행
info_tree.column("#0", width=30, anchor="center")
info_tree.heading("#0", text="속성")
# 1행
info_tree.column("#1", width=70, anchor="w")
info_tree.heading("#1", text="함정 1", anchor="center")
# 2행
info_tree.column("#2", width=70, anchor="w")
info_tree.heading("#2", text="함정 2", anchor="center")
# 3행
info_tree.column("#3", width=70, anchor="w")
info_tree.heading("#3", text="함정 3", anchor="center")
## insert table
data = []
for i in range(len(init_data)):
data.append(init_data[i][0])
info_tree.insert('', "end", text="X", values=data, iid="x")
data = []
for i in range(len(init_data)):
data.append(init_data[i][1])
info_tree.insert('', "end", text="Y", values=data, iid="y")
data = []
for i in range(len(init_data)):
data.append(init_data[i][2])
info_tree.insert('', "end", text="Now_d", values=data, iid="nd" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][3])
info_tree.insert('', "end", text="Total_d", values=data, iid="td" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][4])
info_tree.insert('', "end", text="T1", values=data, iid="t1" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][5])
info_tree.insert('', "end", text="T2", values=data, iid="t2" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][6])
info_tree.insert('', "end", text="T3", values=data, iid="t3" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][7])
info_tree.insert('', "end", text="Knot", values=data, iid="knot" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][8])
info_tree.insert('', "end", text="D_range", values=data, iid="dr" + str(i))
data = []
for i in range(len(init_data)):
data.append(init_data[i][9])
info_tree.insert('', "end", text="Path", values=data, iid="path" + str(i))
# for i in range(3):
# info_tree.insert('', "end", text="X", values=init_data[i][0], iid="x" + str(i))
# for i in range(3):
# info_tree.insert('', "end", text="Y", values=init_data[i][1], iid="y" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="Now_d", values=init_data[i][2], iid="nd" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="Total_d", values=init_data[i][3], iid="td" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="T1_d", values=init_data[i][4], iid="1d" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="T2_d", values=init_data[i][5], iid="2d" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="T3_d", values=init_data[i][6], iid="3d" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="Knot", values=init_data[i][7], iid="knot" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="D_range", values=init_data[i][8], iid="dr" + str(i))
# for i in range(len(init_data)):
# info_tree.insert('', "end", text="Path", values=init_data[i][9], iid="path" + str(i))
return info_tree
|
normal
|
{
"blob_id": "a4d47b9a28ec66f6a0473498674ebc538d909519",
"index": 5111,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef path_to_string(s):\n res = ''\n for i in range(len(s) - 1):\n res += str(s[i][0])\n res += ', '\n res += str(s[i][1])\n res += ' > '\n res += str(s[i][0])\n res += ', '\n res += str(s[i][1])\n return res\n\n\ndef read_file_formatting(self, patrol, target):\n data = []\n for i in range(len(patrol)):\n data.append([])\n for j in range(len(target) + 7):\n data[i].append(0)\n for i in range(len(patrol)):\n data[i][0] = patrol[i].get_x()\n data[i][1] = patrol[i].get_y()\n data[i][2] = 0\n data[i][3] = 0\n data[i][-1] = self.path_to_string(patrol[i].get_path())\n data[i][-2] = patrol[i].get_detection_dist()\n data[i][-3] = patrol[i].get_knot()\n for j in range(len(target)):\n data[i][4 + j] = 0\n return data\n\n\ndef init_info(frame_i, init_data):\n info_lbl = tk.Label(frame_i, text='Ship Info')\n info_lbl.pack()\n info_tree = tk.ttk.Treeview(frame_i, columns=['함정 1', '함정 2', '함정 3'],\n displaycolumns=['함정 1', '함정 2', '함정 3'])\n info_tree.pack()\n info_tree.column('#0', width=30, anchor='center')\n info_tree.heading('#0', text='속성')\n info_tree.column('#1', width=70, anchor='w')\n info_tree.heading('#1', text='함정 1', anchor='center')\n info_tree.column('#2', width=70, anchor='w')\n info_tree.heading('#2', text='함정 2', anchor='center')\n info_tree.column('#3', width=70, anchor='w')\n info_tree.heading('#3', text='함정 3', anchor='center')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][0])\n info_tree.insert('', 'end', text='X', values=data, iid='x')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][1])\n info_tree.insert('', 'end', text='Y', values=data, iid='y')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][2])\n info_tree.insert('', 'end', text='Now_d', values=data, iid='nd' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][3])\n info_tree.insert('', 'end', text='Total_d', values=data, iid='td' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][4])\n info_tree.insert('', 'end', text='T1', values=data, iid='t1' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][5])\n info_tree.insert('', 'end', text='T2', values=data, iid='t2' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][6])\n info_tree.insert('', 'end', text='T3', values=data, iid='t3' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][7])\n info_tree.insert('', 'end', text='Knot', values=data, iid='knot' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][8])\n info_tree.insert('', 'end', text='D_range', values=data, iid='dr' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][9])\n info_tree.insert('', 'end', text='Path', values=data, iid='path' + str(i))\n return info_tree\n",
"step-3": "<mask token>\n\n\ndef update_info(info_t, data):\n pass\n\n\ndef path_to_string(s):\n res = ''\n for i in range(len(s) - 1):\n res += str(s[i][0])\n res += ', '\n res += str(s[i][1])\n res += ' > '\n res += str(s[i][0])\n res += ', '\n res += str(s[i][1])\n return res\n\n\ndef read_file_formatting(self, patrol, target):\n data = []\n for i in range(len(patrol)):\n data.append([])\n for j in range(len(target) + 7):\n data[i].append(0)\n for i in range(len(patrol)):\n data[i][0] = patrol[i].get_x()\n data[i][1] = patrol[i].get_y()\n data[i][2] = 0\n data[i][3] = 0\n data[i][-1] = self.path_to_string(patrol[i].get_path())\n data[i][-2] = patrol[i].get_detection_dist()\n data[i][-3] = patrol[i].get_knot()\n for j in range(len(target)):\n data[i][4 + j] = 0\n return data\n\n\ndef init_info(frame_i, init_data):\n info_lbl = tk.Label(frame_i, text='Ship Info')\n info_lbl.pack()\n info_tree = tk.ttk.Treeview(frame_i, columns=['함정 1', '함정 2', '함정 3'],\n displaycolumns=['함정 1', '함정 2', '함정 3'])\n info_tree.pack()\n info_tree.column('#0', width=30, anchor='center')\n info_tree.heading('#0', text='속성')\n info_tree.column('#1', width=70, anchor='w')\n info_tree.heading('#1', text='함정 1', anchor='center')\n info_tree.column('#2', width=70, anchor='w')\n info_tree.heading('#2', text='함정 2', anchor='center')\n info_tree.column('#3', width=70, anchor='w')\n info_tree.heading('#3', text='함정 3', anchor='center')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][0])\n info_tree.insert('', 'end', text='X', values=data, iid='x')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][1])\n info_tree.insert('', 'end', text='Y', values=data, iid='y')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][2])\n info_tree.insert('', 'end', text='Now_d', values=data, iid='nd' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][3])\n info_tree.insert('', 'end', text='Total_d', values=data, iid='td' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][4])\n info_tree.insert('', 'end', text='T1', values=data, iid='t1' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][5])\n info_tree.insert('', 'end', text='T2', values=data, iid='t2' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][6])\n info_tree.insert('', 'end', text='T3', values=data, iid='t3' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][7])\n info_tree.insert('', 'end', text='Knot', values=data, iid='knot' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][8])\n info_tree.insert('', 'end', text='D_range', values=data, iid='dr' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][9])\n info_tree.insert('', 'end', text='Path', values=data, iid='path' + str(i))\n return info_tree\n",
"step-4": "import tkinter.ttk\nimport tkinter as tk\n\n\ndef update_info(info_t, data):\n pass\n\n\ndef path_to_string(s):\n res = ''\n for i in range(len(s) - 1):\n res += str(s[i][0])\n res += ', '\n res += str(s[i][1])\n res += ' > '\n res += str(s[i][0])\n res += ', '\n res += str(s[i][1])\n return res\n\n\ndef read_file_formatting(self, patrol, target):\n data = []\n for i in range(len(patrol)):\n data.append([])\n for j in range(len(target) + 7):\n data[i].append(0)\n for i in range(len(patrol)):\n data[i][0] = patrol[i].get_x()\n data[i][1] = patrol[i].get_y()\n data[i][2] = 0\n data[i][3] = 0\n data[i][-1] = self.path_to_string(patrol[i].get_path())\n data[i][-2] = patrol[i].get_detection_dist()\n data[i][-3] = patrol[i].get_knot()\n for j in range(len(target)):\n data[i][4 + j] = 0\n return data\n\n\ndef init_info(frame_i, init_data):\n info_lbl = tk.Label(frame_i, text='Ship Info')\n info_lbl.pack()\n info_tree = tk.ttk.Treeview(frame_i, columns=['함정 1', '함정 2', '함정 3'],\n displaycolumns=['함정 1', '함정 2', '함정 3'])\n info_tree.pack()\n info_tree.column('#0', width=30, anchor='center')\n info_tree.heading('#0', text='속성')\n info_tree.column('#1', width=70, anchor='w')\n info_tree.heading('#1', text='함정 1', anchor='center')\n info_tree.column('#2', width=70, anchor='w')\n info_tree.heading('#2', text='함정 2', anchor='center')\n info_tree.column('#3', width=70, anchor='w')\n info_tree.heading('#3', text='함정 3', anchor='center')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][0])\n info_tree.insert('', 'end', text='X', values=data, iid='x')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][1])\n info_tree.insert('', 'end', text='Y', values=data, iid='y')\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][2])\n info_tree.insert('', 'end', text='Now_d', values=data, iid='nd' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][3])\n info_tree.insert('', 'end', text='Total_d', values=data, iid='td' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][4])\n info_tree.insert('', 'end', text='T1', values=data, iid='t1' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][5])\n info_tree.insert('', 'end', text='T2', values=data, iid='t2' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][6])\n info_tree.insert('', 'end', text='T3', values=data, iid='t3' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][7])\n info_tree.insert('', 'end', text='Knot', values=data, iid='knot' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][8])\n info_tree.insert('', 'end', text='D_range', values=data, iid='dr' + str(i))\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][9])\n info_tree.insert('', 'end', text='Path', values=data, iid='path' + str(i))\n return info_tree\n",
"step-5": "import tkinter.ttk\nimport tkinter as tk\n\n\n\ndef update_info(info_t, data):\n # temp = info_t.selection_set(\"x\")\n # print(temp)\n # info_t.delete(temp)\n # temp = info_t.selection_set(\"y\")\n # info_t.delete(temp)\n pass\n\ndef path_to_string(s):\n res = \"\"\n for i in range(len(s)-1):\n res += str(s[i][0])\n res += \", \"\n res += str(s[i][1])\n res += \" > \"\n res += str(s[i][0])\n res += \", \"\n res += str(s[i][1])\n\n return res\n\n\ndef read_file_formatting(self, patrol, target):\n data = []\n\n for i in range(len(patrol)):\n data.append([])\n for j in range(len(target)+7):\n data[i].append(0)\n\n for i in range(len(patrol)):\n data[i][0] = patrol[i].get_x()\n data[i][1] = patrol[i].get_y()\n # 현 탐지\n data[i][2] = 0\n # 총 탐지\n data[i][3] = 0\n # 경로\n data[i][-1] = self.path_to_string(patrol[i].get_path())\n # 탐지 범위\n data[i][-2] = patrol[i].get_detection_dist()\n # Knot\n data[i][-3] = patrol[i].get_knot()\n # target detection time\n for j in range(len(target)):\n data[i][4+j] = 0\n\n return data\n\n\ndef init_info(frame_i, init_data):\n ## setting info\n info_lbl = tk.Label(frame_i, text=\"Ship Info\")\n info_lbl.pack()\n # setting treeView\n info_tree = tk.ttk.Treeview(frame_i, columns=[\"함정 1\", \"함정 2\", \"함정 3\"], displaycolumns=[\"함정 1\", \"함정 2\", \"함정 3\"])\n info_tree.pack()\n\n # 0행\n info_tree.column(\"#0\", width=30, anchor=\"center\")\n info_tree.heading(\"#0\", text=\"속성\")\n # 1행\n info_tree.column(\"#1\", width=70, anchor=\"w\")\n info_tree.heading(\"#1\", text=\"함정 1\", anchor=\"center\")\n # 2행\n info_tree.column(\"#2\", width=70, anchor=\"w\")\n info_tree.heading(\"#2\", text=\"함정 2\", anchor=\"center\")\n # 3행\n info_tree.column(\"#3\", width=70, anchor=\"w\")\n info_tree.heading(\"#3\", text=\"함정 3\", anchor=\"center\")\n\n ## insert table\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][0])\n info_tree.insert('', \"end\", text=\"X\", values=data, iid=\"x\")\n\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][1])\n info_tree.insert('', \"end\", text=\"Y\", values=data, iid=\"y\")\n\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][2])\n info_tree.insert('', \"end\", text=\"Now_d\", values=data, iid=\"nd\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][3])\n info_tree.insert('', \"end\", text=\"Total_d\", values=data, iid=\"td\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][4])\n info_tree.insert('', \"end\", text=\"T1\", values=data, iid=\"t1\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][5])\n info_tree.insert('', \"end\", text=\"T2\", values=data, iid=\"t2\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][6])\n info_tree.insert('', \"end\", text=\"T3\", values=data, iid=\"t3\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][7])\n info_tree.insert('', \"end\", text=\"Knot\", values=data, iid=\"knot\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][8])\n info_tree.insert('', \"end\", text=\"D_range\", values=data, iid=\"dr\" + str(i))\n\n data = []\n for i in range(len(init_data)):\n data.append(init_data[i][9])\n info_tree.insert('', \"end\", text=\"Path\", values=data, iid=\"path\" + str(i))\n\n # for i in range(3):\n # info_tree.insert('', \"end\", text=\"X\", values=init_data[i][0], iid=\"x\" + str(i))\n # for i in range(3):\n # info_tree.insert('', \"end\", text=\"Y\", values=init_data[i][1], iid=\"y\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"Now_d\", values=init_data[i][2], iid=\"nd\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"Total_d\", values=init_data[i][3], iid=\"td\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"T1_d\", values=init_data[i][4], iid=\"1d\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"T2_d\", values=init_data[i][5], iid=\"2d\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"T3_d\", values=init_data[i][6], iid=\"3d\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"Knot\", values=init_data[i][7], iid=\"knot\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"D_range\", values=init_data[i][8], iid=\"dr\" + str(i))\n # for i in range(len(init_data)):\n # info_tree.insert('', \"end\", text=\"Path\", values=init_data[i][9], iid=\"path\" + str(i))\n\n return info_tree\n\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(libras)
<|reserved_special_token_1|>
quilogramas = float(input('Insira o peso em Kg:'))
libras = quilogramas / 0, 45
print(libras)
<|reserved_special_token_1|>
quilogramas = float ( input ( "Insira o peso em Kg:" ))
libras = quilogramas / 0 , 45
print ( libras )
|
flexible
|
{
"blob_id": "9c35e64fd773c79dc20e6b388478e892bda85788",
"index": 1599,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(libras)\n",
"step-3": "quilogramas = float(input('Insira o peso em Kg:'))\nlibras = quilogramas / 0, 45\nprint(libras)\n",
"step-4": "quilogramas = float ( input ( \"Insira o peso em Kg:\" ))\nlibras = quilogramas / 0 , 45\nprint ( libras )",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def request_dyn():
logger.info('dyn: 开始测试请求')
postUrl = '%s/raframework/browse/dyn' % serverUrl
postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':
'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',
'accessibilityMode': 'false', 'themeSelection': 'Skyros',
'sso_token': s.headers.get('_sso_token')}
responseRes = s.post(postUrl, data=postData)
logger.info(f'dyn: 响应header:{responseRes.headers}')
def request_planning_session(plan_name):
"""
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": "PLANNING_LWA_JSTI1:application",
"repository_token": "59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d",
"rightToLeft": "false",
"sourceApp": "JSTI1",
"sso_token": "sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=",
"themeSelection": "Skyros",
"""
logger.info('planning_session: 开始测试请求')
postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %
serverUrl)
postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',
'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',
'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',
'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':
f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.
cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',
'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),
'themeSelection': 'Skyros'}
responseRes = s.post(postUrl, data=postData)
logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')
s.cookies.set('ORA_HP_MRUApplication', plan_name, path=
'/HyperionPlanning/', domain=host)
s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),
path='/HyperionPlanning/', domain=host)
logger.info('当前的header为: ' + str(s.headers))
logger.info('当前的cookie为: ' + str(s.cookies))
<|reserved_special_token_0|>
def request_planning_table():
s.headers['Adf-Ads-Page-Id'] = '2'
s.headers['Adf-Rich-Message'] = 'true'
url = (serverUrl +
'/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='
)
response = s.post(url + '14hssan6gi_4', data=
'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'
)
logger.info(response.content)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def workspaceLogon(account, password):
logger.info('logon: 开始模拟登录workspace')
postUrl = '%sworkspace/logon' % serverUrl
postData = {'sso_username': account, 'sso_password': password}
try:
responseRes = s.post(postUrl, data=postData)
except Exception as e:
logger.error(e)
raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')
logger.info('登录返回: ' + responseRes.text)
sso_token = responseRes.text.split('[')[2].split(']')[0]
assertertoken = responseRes.text.split('[')[6].split(']')[0]
assertertoken_ = {'ora_epm_ctg': assertertoken}
updateHeaders(assertertoken_)
token = {'_sso_token': sso_token}
updateHeaders(token)
CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')
csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}
updateHeaders(csrf_)
ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')
h = {'X-ORACLE-DMS-ECID': ECID}
updateHeaders(h)
def updateHeaders(h):
logger.info(f'更新请求头: {h}')
s.headers.update(h)
def request_dyn():
logger.info('dyn: 开始测试请求')
postUrl = '%s/raframework/browse/dyn' % serverUrl
postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':
'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',
'accessibilityMode': 'false', 'themeSelection': 'Skyros',
'sso_token': s.headers.get('_sso_token')}
responseRes = s.post(postUrl, data=postData)
logger.info(f'dyn: 响应header:{responseRes.headers}')
def request_planning_session(plan_name):
"""
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": "PLANNING_LWA_JSTI1:application",
"repository_token": "59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d",
"rightToLeft": "false",
"sourceApp": "JSTI1",
"sso_token": "sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=",
"themeSelection": "Skyros",
"""
logger.info('planning_session: 开始测试请求')
postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %
serverUrl)
postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',
'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',
'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',
'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':
f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.
cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',
'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),
'themeSelection': 'Skyros'}
responseRes = s.post(postUrl, data=postData)
logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')
s.cookies.set('ORA_HP_MRUApplication', plan_name, path=
'/HyperionPlanning/', domain=host)
s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),
path='/HyperionPlanning/', domain=host)
logger.info('当前的header为: ' + str(s.headers))
logger.info('当前的cookie为: ' + str(s.cookies))
<|reserved_special_token_0|>
def request_planning_table():
s.headers['Adf-Ads-Page-Id'] = '2'
s.headers['Adf-Rich-Message'] = 'true'
url = (serverUrl +
'/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='
)
response = s.post(url + '14hssan6gi_4', data=
'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'
)
logger.info(response.content)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger.setLevel(logging.INFO)
<|reserved_special_token_0|>
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
<|reserved_special_token_0|>
s.headers.update({'User-Agent': userAgent})
s.headers.update({'Referer': serverUrl})
s.headers.update({'Host': host})
def workspaceLogon(account, password):
logger.info('logon: 开始模拟登录workspace')
postUrl = '%sworkspace/logon' % serverUrl
postData = {'sso_username': account, 'sso_password': password}
try:
responseRes = s.post(postUrl, data=postData)
except Exception as e:
logger.error(e)
raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')
logger.info('登录返回: ' + responseRes.text)
sso_token = responseRes.text.split('[')[2].split(']')[0]
assertertoken = responseRes.text.split('[')[6].split(']')[0]
assertertoken_ = {'ora_epm_ctg': assertertoken}
updateHeaders(assertertoken_)
token = {'_sso_token': sso_token}
updateHeaders(token)
CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')
csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}
updateHeaders(csrf_)
ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')
h = {'X-ORACLE-DMS-ECID': ECID}
updateHeaders(h)
def updateHeaders(h):
logger.info(f'更新请求头: {h}')
s.headers.update(h)
def request_dyn():
logger.info('dyn: 开始测试请求')
postUrl = '%s/raframework/browse/dyn' % serverUrl
postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':
'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',
'accessibilityMode': 'false', 'themeSelection': 'Skyros',
'sso_token': s.headers.get('_sso_token')}
responseRes = s.post(postUrl, data=postData)
logger.info(f'dyn: 响应header:{responseRes.headers}')
def request_planning_session(plan_name):
"""
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": "PLANNING_LWA_JSTI1:application",
"repository_token": "59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d",
"rightToLeft": "false",
"sourceApp": "JSTI1",
"sso_token": "sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=",
"themeSelection": "Skyros",
"""
logger.info('planning_session: 开始测试请求')
postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %
serverUrl)
postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',
'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',
'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',
'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':
f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.
cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',
'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),
'themeSelection': 'Skyros'}
responseRes = s.post(postUrl, data=postData)
logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')
s.cookies.set('ORA_HP_MRUApplication', plan_name, path=
'/HyperionPlanning/', domain=host)
s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),
path='/HyperionPlanning/', domain=host)
logger.info('当前的header为: ' + str(s.headers))
logger.info('当前的cookie为: ' + str(s.cookies))
<|reserved_special_token_0|>
def request_planning_table():
s.headers['Adf-Ads-Page-Id'] = '2'
s.headers['Adf-Rich-Message'] = 'true'
url = (serverUrl +
'/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='
)
response = s.post(url + '14hssan6gi_4', data=
'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'
)
logger.info(response.content)
if __name__ == '__main__':
workspaceLogon('admin', 'welcome1')
request_dyn()
request_planning_session('JSTI')
request_planning_table()
<|reserved_special_token_1|>
import requests
from urllib.parse import quote
import logging
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=
'%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)
userAgent = (
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
)
serverUrl = 'http://epm.huaxinglu72hao.com/'
host = 'epm.huaxinglu72hao.com'
s = requests.Session()
s.headers.update({'User-Agent': userAgent})
s.headers.update({'Referer': serverUrl})
s.headers.update({'Host': host})
def workspaceLogon(account, password):
logger.info('logon: 开始模拟登录workspace')
postUrl = '%sworkspace/logon' % serverUrl
postData = {'sso_username': account, 'sso_password': password}
try:
responseRes = s.post(postUrl, data=postData)
except Exception as e:
logger.error(e)
raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')
logger.info('登录返回: ' + responseRes.text)
sso_token = responseRes.text.split('[')[2].split(']')[0]
assertertoken = responseRes.text.split('[')[6].split(']')[0]
assertertoken_ = {'ora_epm_ctg': assertertoken}
updateHeaders(assertertoken_)
token = {'_sso_token': sso_token}
updateHeaders(token)
CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')
csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}
updateHeaders(csrf_)
ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')
h = {'X-ORACLE-DMS-ECID': ECID}
updateHeaders(h)
def updateHeaders(h):
logger.info(f'更新请求头: {h}')
s.headers.update(h)
def request_dyn():
logger.info('dyn: 开始测试请求')
postUrl = '%s/raframework/browse/dyn' % serverUrl
postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':
'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',
'accessibilityMode': 'false', 'themeSelection': 'Skyros',
'sso_token': s.headers.get('_sso_token')}
responseRes = s.post(postUrl, data=postData)
logger.info(f'dyn: 响应header:{responseRes.headers}')
def request_planning_session(plan_name):
"""
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": "PLANNING_LWA_JSTI1:application",
"repository_token": "59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d",
"rightToLeft": "false",
"sourceApp": "JSTI1",
"sso_token": "sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=",
"themeSelection": "Skyros",
"""
logger.info('planning_session: 开始测试请求')
postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %
serverUrl)
postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',
'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',
'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',
'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':
f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.
cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',
'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),
'themeSelection': 'Skyros'}
responseRes = s.post(postUrl, data=postData)
logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')
s.cookies.set('ORA_HP_MRUApplication', plan_name, path=
'/HyperionPlanning/', domain=host)
s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),
path='/HyperionPlanning/', domain=host)
logger.info('当前的header为: ' + str(s.headers))
logger.info('当前的cookie为: ' + str(s.cookies))
import re
def request_planning_table():
s.headers['Adf-Ads-Page-Id'] = '2'
s.headers['Adf-Rich-Message'] = 'true'
url = (serverUrl +
'/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='
)
response = s.post(url + '14hssan6gi_4', data=
'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'
)
logger.info(response.content)
if __name__ == '__main__':
workspaceLogon('admin', 'welcome1')
request_dyn()
request_planning_session('JSTI')
request_planning_table()
<|reserved_special_token_1|>
#! py -3
# -*- coding: utf-8 -*-
import requests
from urllib.parse import quote
import logging
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
logger = logging.getLogger()
# 配置日志级别,如果不显示配置,默认为Warning,表示所有warning级别已下的其他level直接被省略,
# 内部绑定的handler对象也只能接收到warning级别以上的level,你可以理解为总开关
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt="%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p") # 创建一个格式化对象
console = logging.StreamHandler() # 配置日志输出到控制台
console.setLevel(logging.INFO) # 设置输出到控制台的最低日志级别
console.setFormatter(formatter) # 设置格式
logger.addHandler(console)
# 后续这些配置项都会移动到一个单独的配置文件
userAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
# serverUrl = "http://192.168.60.125:19000/"
serverUrl = "http://epm.huaxinglu72hao.com/"
host = "epm.huaxinglu72hao.com"
# 定义当前会话
# 会话使用统一的header 和 cookie
# 下面代码更新header 和 cookie 的使用后续的所有请求都会使用更新后的header 和 cookie
# 所以下面的函数没有返回值
s = requests.Session()
s.headers.update({"User-Agent": userAgent})
s.headers.update({"Referer": serverUrl})
s.headers.update({"Host": host})
def workspaceLogon(account, password):
# 登录
logger.info("logon: 开始模拟登录workspace")
postUrl = "%sworkspace/logon" % serverUrl
postData = {
"sso_username": account,
"sso_password": password,
}
try:
responseRes = s.post(postUrl, data = postData)
except Exception as e:
logger.error(e)
raise RuntimeError("登录失败: 网络异常, 请检查服务器地址配置")
logger.info("登录返回: " + responseRes.text)
# 无论是否登录成功,状态码一般都是 statusCode = 200
sso_token = responseRes.text.split('[')[2].split(']')[0]
assertertoken = responseRes.text.split('[')[6].split(']')[0]
assertertoken_ = {"ora_epm_ctg": assertertoken}
updateHeaders(assertertoken_)
token = {"_sso_token": sso_token}
updateHeaders(token)
CSRF = responseRes.headers.get("X-ORACLE-BPMUI-CSRF")
csrf_ = {"X-ORACLE-BPMUI-CSRF": CSRF}
updateHeaders(csrf_)
ECID = responseRes.headers.get("X-ORACLE-DMS-ECID")
h = {"X-ORACLE-DMS-ECID": ECID}
updateHeaders(h)
def updateHeaders(h):
logger.info(f"更新请求头: {h}")
s.headers.update(h)
def request_dyn():
logger.info ("dyn: 开始测试请求")
postUrl = "%s/raframework/browse/dyn" % serverUrl
postData={
"page": "/conf/CDSConfig.jsp",
"amp":"",
"action": "returnXML",
"LOCALE_LANGUAGE": "en_US",
"rightToLeft": "false",
"accessibilityMode": "false",
"themeSelection": "Skyros",
"sso_token": s.headers.get("_sso_token")
}
responseRes = s.post(postUrl, data=postData)
# logger.info(f"dyn: 响应text:{responseRes.text}")
logger.info(f"dyn: 响应header:{responseRes.headers}")
def request_planning_session(plan_name):
"""
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": "PLANNING_LWA_JSTI1:application",
"repository_token": "59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d",
"rightToLeft": "false",
"sourceApp": "JSTI1",
"sso_token": "sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=",
"themeSelection": "Skyros",
"""
logger.info ("planning_session: 开始测试请求")
postUrl = "%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp" % serverUrl
postData={
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": f"PLANNING_LWA_{plan_name}:application",
"repository_token": s.cookies.get("ORA_EPMWS_session"),
"rightToLeft": "false",
"sourceApp": plan_name,
"sso_token": s.headers.get("_sso_token"),
"themeSelection": "Skyros",
}
responseRes = s.post(postUrl, data=postData)
# logger.info(f"dyn: 响应text:{responseRes.text}")
logger.info(f"planning_session: 响应cookie:{responseRes.cookies}")
# 手动添加两个cookie
s.cookies.set("ORA_HP_MRUApplication", plan_name, path="/HyperionPlanning/", domain=host)
s.cookies.set("ORA_HP_MRUUsername", s.cookies.get("ORA_EPMWS_User"), path="/HyperionPlanning/", domain=host)
logger.info("当前的header为: " + str(s.headers))
logger.info("当前的cookie为: " + str(s.cookies))
# logger.info(f"planning_session: 响应:{responseRes.text}")
# 访问一个具体的表单
import re
def request_planning_table():
# 下面的数据都是写死的, 只适用于JSTI->A0000主要指标表
s.headers["Adf-Ads-Page-Id"] = "2"
s.headers["Adf-Rich-Message"] = "true"
url = serverUrl + "/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state="
response = s.post(url + "14hssan6gi_4",
data="p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn")
# 从输出的结果看被重定向了
logger.info(response.content)
# m = re.search(r"_adf\.ctrl-state=.+?&", response.text)
# current = m.group(0).split("=")[1].replace("&", "")
#
# response = s.post(url + current,
# data="p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn")
#
# logger.info(response.content)
if __name__ == "__main__":
# 从返回结果来看,有登录成功
workspaceLogon("admin", "welcome1")
request_dyn()
request_planning_session("JSTI")
request_planning_table()
# logger.info("sso_token = %s" % sso_token)
# logger.info("sso_token = %s" % sso_token)
# logger.info("assertertoken = %s" % assertertoken)
# request_dyn(sso_token,assertertoken)
# requestHSS("tYy6FOvH4ZhJR1CUTy83Q9ZJxiNnYbnAt8fjWcMBII4rEmQlYjth+/M4MLIXVuXp7Hi3xQS4+QRySoxvNuFibcGbxbIYRVLFVKogwyhtIAcvtIXMvfhxd8svcLZgIXyTklurCsTarP9KtRgc26B3XRWlDG/QAzVLWyGH26ROffQpUj+bW6yRrj7A0udq1PbqGFXjDZ9iNW0ALbg0Z5NC7g3pBgjtetBohXRmpV32DCw4tI1Y7j7tLnHtSFk/NtdNri5AAFCTqTPd6HYdBzbCDqfP7ZEdfeXJFsfatRE5Pcgqm36hV1U7HeDENhTvNBtZiiQ9OfMdopyHQQvPnBQsyfKzSKTq1O5bSHH9HzQfCJdvq/nkSbalctY2SxIb0vtefJ9fUZ2y4bMAm/g95EZLiKZ5aouVrzOKjt8sl1zVctk+Ivg141wUPqtTULOYdBoi")
|
flexible
|
{
"blob_id": "c5d92ec592250d5bc896d32941364b92ff1d21e9",
"index": 3793,
"step-1": "<mask token>\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\n<mask token>\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef workspaceLogon(account, password):\n logger.info('logon: 开始模拟登录workspace')\n postUrl = '%sworkspace/logon' % serverUrl\n postData = {'sso_username': account, 'sso_password': password}\n try:\n responseRes = s.post(postUrl, data=postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')\n logger.info('登录返回: ' + responseRes.text)\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n assertertoken_ = {'ora_epm_ctg': assertertoken}\n updateHeaders(assertertoken_)\n token = {'_sso_token': sso_token}\n updateHeaders(token)\n CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')\n csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}\n updateHeaders(csrf_)\n ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')\n h = {'X-ORACLE-DMS-ECID': ECID}\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f'更新请求头: {h}')\n s.headers.update(h)\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\n<mask token>\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger.setLevel(logging.INFO)\n<mask token>\nconsole.setLevel(logging.INFO)\nconsole.setFormatter(formatter)\nlogger.addHandler(console)\n<mask token>\ns.headers.update({'User-Agent': userAgent})\ns.headers.update({'Referer': serverUrl})\ns.headers.update({'Host': host})\n\n\ndef workspaceLogon(account, password):\n logger.info('logon: 开始模拟登录workspace')\n postUrl = '%sworkspace/logon' % serverUrl\n postData = {'sso_username': account, 'sso_password': password}\n try:\n responseRes = s.post(postUrl, data=postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')\n logger.info('登录返回: ' + responseRes.text)\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n assertertoken_ = {'ora_epm_ctg': assertertoken}\n updateHeaders(assertertoken_)\n token = {'_sso_token': sso_token}\n updateHeaders(token)\n CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')\n csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}\n updateHeaders(csrf_)\n ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')\n h = {'X-ORACLE-DMS-ECID': ECID}\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f'更新请求头: {h}')\n s.headers.update(h)\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\n<mask token>\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\nif __name__ == '__main__':\n workspaceLogon('admin', 'welcome1')\n request_dyn()\n request_planning_session('JSTI')\n request_planning_table()\n",
"step-4": "import requests\nfrom urllib.parse import quote\nimport logging\nfrom urllib.parse import urlparse\nlogger = logging.getLogger(__name__)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter(fmt=\n '%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nconsole.setFormatter(formatter)\nlogger.addHandler(console)\nuserAgent = (\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n )\nserverUrl = 'http://epm.huaxinglu72hao.com/'\nhost = 'epm.huaxinglu72hao.com'\ns = requests.Session()\ns.headers.update({'User-Agent': userAgent})\ns.headers.update({'Referer': serverUrl})\ns.headers.update({'Host': host})\n\n\ndef workspaceLogon(account, password):\n logger.info('logon: 开始模拟登录workspace')\n postUrl = '%sworkspace/logon' % serverUrl\n postData = {'sso_username': account, 'sso_password': password}\n try:\n responseRes = s.post(postUrl, data=postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')\n logger.info('登录返回: ' + responseRes.text)\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n assertertoken_ = {'ora_epm_ctg': assertertoken}\n updateHeaders(assertertoken_)\n token = {'_sso_token': sso_token}\n updateHeaders(token)\n CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')\n csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}\n updateHeaders(csrf_)\n ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')\n h = {'X-ORACLE-DMS-ECID': ECID}\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f'更新请求头: {h}')\n s.headers.update(h)\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\nimport re\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\nif __name__ == '__main__':\n workspaceLogon('admin', 'welcome1')\n request_dyn()\n request_planning_session('JSTI')\n request_planning_table()\n",
"step-5": "#! py -3\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom urllib.parse import quote\nimport logging\nfrom urllib.parse import urlparse\n\nlogger = logging.getLogger(__name__)\n\nlogger = logging.getLogger()\n# 配置日志级别,如果不显示配置,默认为Warning,表示所有warning级别已下的其他level直接被省略,\n# 内部绑定的handler对象也只能接收到warning级别以上的level,你可以理解为总开关\nlogger.setLevel(logging.INFO)\n\nformatter = logging.Formatter(fmt=\"%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s\",\n datefmt=\"%m/%d/%Y %I:%M:%S %p\") # 创建一个格式化对象\n\nconsole = logging.StreamHandler() # 配置日志输出到控制台\nconsole.setLevel(logging.INFO) # 设置输出到控制台的最低日志级别\nconsole.setFormatter(formatter) # 设置格式\nlogger.addHandler(console)\n\n\n# 后续这些配置项都会移动到一个单独的配置文件\nuserAgent = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"\n# serverUrl = \"http://192.168.60.125:19000/\"\nserverUrl = \"http://epm.huaxinglu72hao.com/\"\nhost = \"epm.huaxinglu72hao.com\"\n# 定义当前会话\n# 会话使用统一的header 和 cookie\n# 下面代码更新header 和 cookie 的使用后续的所有请求都会使用更新后的header 和 cookie\n# 所以下面的函数没有返回值\ns = requests.Session()\ns.headers.update({\"User-Agent\": userAgent})\ns.headers.update({\"Referer\": serverUrl})\ns.headers.update({\"Host\": host})\n\n\n\n\n\n\ndef workspaceLogon(account, password):\n # 登录\n logger.info(\"logon: 开始模拟登录workspace\")\n\n postUrl = \"%sworkspace/logon\" % serverUrl\n postData = {\n \"sso_username\": account,\n \"sso_password\": password,\n }\n try:\n responseRes = s.post(postUrl, data = postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError(\"登录失败: 网络异常, 请检查服务器地址配置\")\n\n\n\n logger.info(\"登录返回: \" + responseRes.text)\n\n # 无论是否登录成功,状态码一般都是 statusCode = 200\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n\n assertertoken_ = {\"ora_epm_ctg\": assertertoken}\n\n updateHeaders(assertertoken_)\n\n token = {\"_sso_token\": sso_token}\n updateHeaders(token)\n\n\n CSRF = responseRes.headers.get(\"X-ORACLE-BPMUI-CSRF\")\n csrf_ = {\"X-ORACLE-BPMUI-CSRF\": CSRF}\n updateHeaders(csrf_)\n\n ECID = responseRes.headers.get(\"X-ORACLE-DMS-ECID\")\n h = {\"X-ORACLE-DMS-ECID\": ECID}\n\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f\"更新请求头: {h}\")\n s.headers.update(h)\n\n\ndef request_dyn():\n\n logger.info (\"dyn: 开始测试请求\")\n postUrl = \"%s/raframework/browse/dyn\" % serverUrl\n postData={\n \"page\": \"/conf/CDSConfig.jsp\",\n \"amp\":\"\",\n \"action\": \"returnXML\",\n \"LOCALE_LANGUAGE\": \"en_US\",\n \"rightToLeft\": \"false\",\n \"accessibilityMode\": \"false\",\n \"themeSelection\": \"Skyros\",\n \"sso_token\": s.headers.get(\"_sso_token\")\n }\n responseRes = s.post(postUrl, data=postData)\n # logger.info(f\"dyn: 响应text:{responseRes.text}\")\n logger.info(f\"dyn: 响应header:{responseRes.headers}\")\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n\n\n\n\n logger.info (\"planning_session: 开始测试请求\")\n postUrl = \"%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp\" % serverUrl\n postData={\n \"accessibilityMode\": \"false\",\n \"bpm.contentheight\": \"621\",\n \"bpm.contentwidth\": \"1314\",\n \"bpm.objectpaletteheight\": \"648\",\n \"bpm.objectpalettewidth\": \"207\",\n \"cluster\": \"PLANNING_LWA\",\n \"instance\": \"7\",\n \"LOCALE_LANGUAGE\": \"zh_CN\",\n \"mru_id\": f\"PLANNING_LWA_{plan_name}:application\",\n \"repository_token\": s.cookies.get(\"ORA_EPMWS_session\"),\n \"rightToLeft\": \"false\",\n \"sourceApp\": plan_name,\n \"sso_token\": s.headers.get(\"_sso_token\"),\n \"themeSelection\": \"Skyros\",\n }\n responseRes = s.post(postUrl, data=postData)\n # logger.info(f\"dyn: 响应text:{responseRes.text}\")\n logger.info(f\"planning_session: 响应cookie:{responseRes.cookies}\")\n\n # 手动添加两个cookie\n s.cookies.set(\"ORA_HP_MRUApplication\", plan_name, path=\"/HyperionPlanning/\", domain=host)\n s.cookies.set(\"ORA_HP_MRUUsername\", s.cookies.get(\"ORA_EPMWS_User\"), path=\"/HyperionPlanning/\", domain=host)\n\n logger.info(\"当前的header为: \" + str(s.headers))\n logger.info(\"当前的cookie为: \" + str(s.cookies))\n # logger.info(f\"planning_session: 响应:{responseRes.text}\")\n\n# 访问一个具体的表单\nimport re\ndef request_planning_table():\n # 下面的数据都是写死的, 只适用于JSTI->A0000主要指标表\n s.headers[\"Adf-Ads-Page-Id\"] = \"2\"\n s.headers[\"Adf-Rich-Message\"] = \"true\"\n url = serverUrl + \"/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state=\"\n response = s.post(url + \"14hssan6gi_4\",\n data=\"p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn\")\n\n\n # 从输出的结果看被重定向了\n logger.info(response.content)\n\n # m = re.search(r\"_adf\\.ctrl-state=.+?&\", response.text)\n # current = m.group(0).split(\"=\")[1].replace(\"&\", \"\")\n #\n # response = s.post(url + current,\n # data=\"p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn\")\n #\n # logger.info(response.content)\nif __name__ == \"__main__\":\n # 从返回结果来看,有登录成功\n workspaceLogon(\"admin\", \"welcome1\")\n request_dyn()\n request_planning_session(\"JSTI\")\n request_planning_table()\n # logger.info(\"sso_token = %s\" % sso_token)\n # logger.info(\"sso_token = %s\" % sso_token)\n # logger.info(\"assertertoken = %s\" % assertertoken)\n # request_dyn(sso_token,assertertoken)\n # requestHSS(\"tYy6FOvH4ZhJR1CUTy83Q9ZJxiNnYbnAt8fjWcMBII4rEmQlYjth+/M4MLIXVuXp7Hi3xQS4+QRySoxvNuFibcGbxbIYRVLFVKogwyhtIAcvtIXMvfhxd8svcLZgIXyTklurCsTarP9KtRgc26B3XRWlDG/QAzVLWyGH26ROffQpUj+bW6yRrj7A0udq1PbqGFXjDZ9iNW0ALbg0Z5NC7g3pBgjtetBohXRmpV32DCw4tI1Y7j7tLnHtSFk/NtdNri5AAFCTqTPd6HYdBzbCDqfP7ZEdfeXJFsfatRE5Pcgqm36hV1U7HeDENhTvNBtZiiQ9OfMdopyHQQvPnBQsyfKzSKTq1O5bSHH9HzQfCJdvq/nkSbalctY2SxIb0vtefJ9fUZ2y4bMAm/g95EZLiKZ5aouVrzOKjt8sl1zVctk+Ivg141wUPqtTULOYdBoi\")\n\n\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
from external.odds.betclic.api import get_odds
# FDJ parsing is broken - their UI has been refactored with JS framework &
# protected async JSON API usage (requires HEADERS) and more complex to isolate & group match odds
# hence move to another betting website - which is still full html rendered
|
normal
|
{
"blob_id": "8b583ee55df409020a605b467479236e610a2efe",
"index": 3646,
"step-1": "<mask token>\n",
"step-2": "from external.odds.betclic.api import get_odds\n",
"step-3": "from external.odds.betclic.api import get_odds\n\n# FDJ parsing is broken - their UI has been refactored with JS framework &\n# protected async JSON API usage (requires HEADERS) and more complex to isolate & group match odds\n# hence move to another betting website - which is still full html rendered\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- encoding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('apps.profiles.views',
url(r'^$', 'index', name='profiles'),
# Show a specific profile.
url(r'^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile', name='profiles_view'),
url(r'^edit/$', 'edit_profile', name='profile_edit'),
url(r'^privacy/$', 'privacy', name='profile_privacy'),
url(r'^connected_apps/$', 'connected_apps', name='profile_connected_apps'),
url(r'^password/$', 'password', name='profile_password'),
url(r'^position/$', 'position', name='profile_position'),
url(r'^email/$', 'add_email', name='profile_add_email'),
# Ajax views
url(r'^deleteposition/$', 'delete_position', name='profile_delete_position'),
url(r'^email/delete_email/$', 'delete_email', name='profile_delete_email'),
url(r'^email/set_primary/$', 'set_primary', name='profile_set_primary'),
url(r'^email/verify_email/$', 'verify_email', name='profile_verify_email'),
url(r'^email/toggle_infomail/$', 'toggle_infomail', name='profile_toggle_infomail'),
url(r'^email/toggle_jobmail/$', 'toggle_jobmail', name='profile_toggle_jobmail'),
url(r'^marks/update_mark_rules/$', 'update_mark_rules', name='profile_update_mark_rules'),
# Endpoint that exposes a json lump of all users but only id and name.
url(r'^api_plain_user_search/$', 'api_plain_user_search', name='profiles_api_plain_user_search'),
# Endpoint that exposes a json lump of all users which have set their profile to public.
url(r'^api_user_search/$', 'api_user_search', name='profiles_api_user_search'),
url(r'^user_search/$', 'user_search', name='profiles_user_search'),
# Profile index with active tab.
url(r'^(?P<active_tab>\w+)/$', 'index', name='profiles_active'),
)
|
normal
|
{
"blob_id": "5707e24596dfe2d85e9a7caa93aa3e253a41ae40",
"index": 6620,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('apps.profiles.views', url('^$', 'index', name=\n 'profiles'), url('^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile',\n name='profiles_view'), url('^edit/$', 'edit_profile', name=\n 'profile_edit'), url('^privacy/$', 'privacy', name='profile_privacy'),\n url('^connected_apps/$', 'connected_apps', name=\n 'profile_connected_apps'), url('^password/$', 'password', name=\n 'profile_password'), url('^position/$', 'position', name=\n 'profile_position'), url('^email/$', 'add_email', name=\n 'profile_add_email'), url('^deleteposition/$', 'delete_position', name=\n 'profile_delete_position'), url('^email/delete_email/$', 'delete_email',\n name='profile_delete_email'), url('^email/set_primary/$', 'set_primary',\n name='profile_set_primary'), url('^email/verify_email/$',\n 'verify_email', name='profile_verify_email'), url(\n '^email/toggle_infomail/$', 'toggle_infomail', name=\n 'profile_toggle_infomail'), url('^email/toggle_jobmail/$',\n 'toggle_jobmail', name='profile_toggle_jobmail'), url(\n '^marks/update_mark_rules/$', 'update_mark_rules', name=\n 'profile_update_mark_rules'), url('^api_plain_user_search/$',\n 'api_plain_user_search', name='profiles_api_plain_user_search'), url(\n '^api_user_search/$', 'api_user_search', name=\n 'profiles_api_user_search'), url('^user_search/$', 'user_search', name=\n 'profiles_user_search'), url('^(?P<active_tab>\\\\w+)/$', 'index', name=\n 'profiles_active'))\n",
"step-3": "from django.conf.urls import patterns, url\nurlpatterns = patterns('apps.profiles.views', url('^$', 'index', name=\n 'profiles'), url('^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile',\n name='profiles_view'), url('^edit/$', 'edit_profile', name=\n 'profile_edit'), url('^privacy/$', 'privacy', name='profile_privacy'),\n url('^connected_apps/$', 'connected_apps', name=\n 'profile_connected_apps'), url('^password/$', 'password', name=\n 'profile_password'), url('^position/$', 'position', name=\n 'profile_position'), url('^email/$', 'add_email', name=\n 'profile_add_email'), url('^deleteposition/$', 'delete_position', name=\n 'profile_delete_position'), url('^email/delete_email/$', 'delete_email',\n name='profile_delete_email'), url('^email/set_primary/$', 'set_primary',\n name='profile_set_primary'), url('^email/verify_email/$',\n 'verify_email', name='profile_verify_email'), url(\n '^email/toggle_infomail/$', 'toggle_infomail', name=\n 'profile_toggle_infomail'), url('^email/toggle_jobmail/$',\n 'toggle_jobmail', name='profile_toggle_jobmail'), url(\n '^marks/update_mark_rules/$', 'update_mark_rules', name=\n 'profile_update_mark_rules'), url('^api_plain_user_search/$',\n 'api_plain_user_search', name='profiles_api_plain_user_search'), url(\n '^api_user_search/$', 'api_user_search', name=\n 'profiles_api_user_search'), url('^user_search/$', 'user_search', name=\n 'profiles_user_search'), url('^(?P<active_tab>\\\\w+)/$', 'index', name=\n 'profiles_active'))\n",
"step-4": "# -*- encoding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('apps.profiles.views',\n url(r'^$', 'index', name='profiles'),\n\n # Show a specific profile.\n url(r'^view/(?P<username>[a-zA-Z0-9_-]+)/$', 'view_profile', name='profiles_view'),\n\n url(r'^edit/$', 'edit_profile', name='profile_edit'),\n url(r'^privacy/$', 'privacy', name='profile_privacy'),\n url(r'^connected_apps/$', 'connected_apps', name='profile_connected_apps'),\n url(r'^password/$', 'password', name='profile_password'),\n url(r'^position/$', 'position', name='profile_position'),\n url(r'^email/$', 'add_email', name='profile_add_email'),\n\n # Ajax views\n url(r'^deleteposition/$', 'delete_position', name='profile_delete_position'),\n url(r'^email/delete_email/$', 'delete_email', name='profile_delete_email'),\n url(r'^email/set_primary/$', 'set_primary', name='profile_set_primary'),\n url(r'^email/verify_email/$', 'verify_email', name='profile_verify_email'),\n url(r'^email/toggle_infomail/$', 'toggle_infomail', name='profile_toggle_infomail'),\n url(r'^email/toggle_jobmail/$', 'toggle_jobmail', name='profile_toggle_jobmail'),\n url(r'^marks/update_mark_rules/$', 'update_mark_rules', name='profile_update_mark_rules'),\n \n # Endpoint that exposes a json lump of all users but only id and name. \n url(r'^api_plain_user_search/$', 'api_plain_user_search', name='profiles_api_plain_user_search'),\n\n # Endpoint that exposes a json lump of all users which have set their profile to public.\n url(r'^api_user_search/$', 'api_user_search', name='profiles_api_user_search'),\n url(r'^user_search/$', 'user_search', name='profiles_user_search'),\n\n # Profile index with active tab.\n url(r'^(?P<active_tab>\\w+)/$', 'index', name='profiles_active'),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 4, [[1,0],[2,0],[3,1],[3,2]]
# 3->1->0
# \ ^
# \ |
# \> 2
# 1,0,2,3
# stack 3
#
# 0 1 2 3
# 1,0
# stack 1
# 0
#
# def findOrder(numCourses, prerequisites):
# if len(prerequisites) == 0:
# order = []
# for i in range(0, numCourses):
# order.append(i)
# return order
#
# edges = {}
# for prerequisite in prerequisites:
# if prerequisite[0] not in edges:
# edges[prerequisite[0]] = [prerequisite[1]]
# else:
# v = edges[prerequisite[0]]
# v.append(prerequisite[1])
# edges[prerequisite[0]] = v
#
# visited = {}
# stack = []
# order = []
# while len(edges) != 0:
# edge_u = list(edges.keys())[0]
# if len(stack) == 0:
# if edge_u not in visited:
# stack.append(edge_u)
# visited[edge_u] = 1
# else:
# u = stack[-1]
# flag = True
# if u in edges:
# for v in edges[u]:
# if v not in visited:
# visited[v] = 1
# stack.append(v)
# flag = False
# else:
# if v in edges and u in edges[v]:
# return []
# if flag:
# order.append(u)
# stack.pop()
# if u in edges:
# del edges[u]
#
# for i in range(0, numCourses):
# if i not in order:
# order.append(i)
# return order
def findOrder(numCourses, prerequisites):
if len(prerequisites) == 0:
order = []
for i in range(0, numCourses):
order.append(i)
return order
edges = {}
for prerequisite in prerequisites:
if prerequisite[0] == prerequisite[1]:
return []
if prerequisite[0] not in edges:
edges[prerequisite[0]] = [prerequisite[1]]
else:
v = edges[prerequisite[0]]
v.append(prerequisite[1])
edges[prerequisite[0]] = v
visited = {}
stack = []
order = []
for vertex in edges.keys():
if vertex not in visited:
stack.append(vertex)
visited[vertex] = 1
while len(stack) != 0:
v = stack.pop()
if v not in edges:
order.append(v)
else:
flag = True
stack.append(v)
for u in edges[v]:
if u in visited:
if u in edges and v in edges[u]:
return []
else:
visited[u] = 1
stack.append(u)
flag = False
if flag:
stack.pop()
order.append(v)
for v in range(0, numCourses):
if v not in order:
order.append(v)
return order[::-1]
print(findOrder(2, [[1, 0]]))
# print(findOrder(4, [[1, 0], [2, 0], [3, 1], [3, 2]]))
|
normal
|
{
"blob_id": "56892e125934d5de937b92a08bd7707c12c70928",
"index": 689,
"step-1": "<mask token>\n",
"step-2": "def findOrder(numCourses, prerequisites):\n if len(prerequisites) == 0:\n order = []\n for i in range(0, numCourses):\n order.append(i)\n return order\n edges = {}\n for prerequisite in prerequisites:\n if prerequisite[0] == prerequisite[1]:\n return []\n if prerequisite[0] not in edges:\n edges[prerequisite[0]] = [prerequisite[1]]\n else:\n v = edges[prerequisite[0]]\n v.append(prerequisite[1])\n edges[prerequisite[0]] = v\n visited = {}\n stack = []\n order = []\n for vertex in edges.keys():\n if vertex not in visited:\n stack.append(vertex)\n visited[vertex] = 1\n while len(stack) != 0:\n v = stack.pop()\n if v not in edges:\n order.append(v)\n else:\n flag = True\n stack.append(v)\n for u in edges[v]:\n if u in visited:\n if u in edges and v in edges[u]:\n return []\n else:\n visited[u] = 1\n stack.append(u)\n flag = False\n if flag:\n stack.pop()\n order.append(v)\n for v in range(0, numCourses):\n if v not in order:\n order.append(v)\n return order[::-1]\n\n\n<mask token>\n",
"step-3": "def findOrder(numCourses, prerequisites):\n if len(prerequisites) == 0:\n order = []\n for i in range(0, numCourses):\n order.append(i)\n return order\n edges = {}\n for prerequisite in prerequisites:\n if prerequisite[0] == prerequisite[1]:\n return []\n if prerequisite[0] not in edges:\n edges[prerequisite[0]] = [prerequisite[1]]\n else:\n v = edges[prerequisite[0]]\n v.append(prerequisite[1])\n edges[prerequisite[0]] = v\n visited = {}\n stack = []\n order = []\n for vertex in edges.keys():\n if vertex not in visited:\n stack.append(vertex)\n visited[vertex] = 1\n while len(stack) != 0:\n v = stack.pop()\n if v not in edges:\n order.append(v)\n else:\n flag = True\n stack.append(v)\n for u in edges[v]:\n if u in visited:\n if u in edges and v in edges[u]:\n return []\n else:\n visited[u] = 1\n stack.append(u)\n flag = False\n if flag:\n stack.pop()\n order.append(v)\n for v in range(0, numCourses):\n if v not in order:\n order.append(v)\n return order[::-1]\n\n\nprint(findOrder(2, [[1, 0]]))\n",
"step-4": "# 4, [[1,0],[2,0],[3,1],[3,2]]\n\n# 3->1->0\n# \\ ^\n# \\ |\n# \\> 2\n\n# 1,0,2,3\n# stack 3\n#\n# 0 1 2 3\n\n# 1,0\n# stack 1\n# 0\n#\n\n# def findOrder(numCourses, prerequisites):\n# if len(prerequisites) == 0:\n# order = []\n# for i in range(0, numCourses):\n# order.append(i)\n# return order\n#\n# edges = {}\n# for prerequisite in prerequisites:\n# if prerequisite[0] not in edges:\n# edges[prerequisite[0]] = [prerequisite[1]]\n# else:\n# v = edges[prerequisite[0]]\n# v.append(prerequisite[1])\n# edges[prerequisite[0]] = v\n#\n# visited = {}\n# stack = []\n# order = []\n# while len(edges) != 0:\n# edge_u = list(edges.keys())[0]\n# if len(stack) == 0:\n# if edge_u not in visited:\n# stack.append(edge_u)\n# visited[edge_u] = 1\n# else:\n# u = stack[-1]\n# flag = True\n# if u in edges:\n# for v in edges[u]:\n# if v not in visited:\n# visited[v] = 1\n# stack.append(v)\n# flag = False\n# else:\n# if v in edges and u in edges[v]:\n# return []\n# if flag:\n# order.append(u)\n# stack.pop()\n# if u in edges:\n# del edges[u]\n#\n# for i in range(0, numCourses):\n# if i not in order:\n# order.append(i)\n# return order\n\ndef findOrder(numCourses, prerequisites):\n if len(prerequisites) == 0:\n order = []\n for i in range(0, numCourses):\n order.append(i)\n return order\n\n edges = {}\n for prerequisite in prerequisites:\n if prerequisite[0] == prerequisite[1]:\n return []\n if prerequisite[0] not in edges:\n edges[prerequisite[0]] = [prerequisite[1]]\n else:\n v = edges[prerequisite[0]]\n v.append(prerequisite[1])\n edges[prerequisite[0]] = v\n\n visited = {}\n stack = []\n order = []\n for vertex in edges.keys():\n if vertex not in visited:\n stack.append(vertex)\n visited[vertex] = 1\n while len(stack) != 0:\n v = stack.pop()\n if v not in edges:\n order.append(v)\n else:\n flag = True\n stack.append(v)\n for u in edges[v]:\n if u in visited:\n if u in edges and v in edges[u]:\n return []\n else:\n visited[u] = 1\n stack.append(u)\n flag = False\n if flag:\n stack.pop()\n order.append(v)\n for v in range(0, numCourses):\n if v not in order:\n order.append(v)\n\n return order[::-1]\n\n\nprint(findOrder(2, [[1, 0]]))\n# print(findOrder(4, [[1, 0], [2, 0], [3, 1], [3, 2]]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.1.7 on 2020-01-09 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0004_auto_20200109_0713'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='show_type',
field=models.IntegerField(choices=[(1, '首页轮播'), (2, '最新活动')], default=1, verbose_name='展示控制'),
),
]
|
normal
|
{
"blob_id": "b7687240413441e1d3ed0085e5953f8089cbf4c9",
"index": 9303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('goods', '0004_auto_20200109_0713')]\n operations = [migrations.AlterField(model_name='banner', name=\n 'show_type', field=models.IntegerField(choices=[(1, '首页轮播'), (2,\n '最新活动')], default=1, verbose_name='展示控制'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('goods', '0004_auto_20200109_0713')]\n operations = [migrations.AlterField(model_name='banner', name=\n 'show_type', field=models.IntegerField(choices=[(1, '首页轮播'), (2,\n '最新活动')], default=1, verbose_name='展示控制'))]\n",
"step-5": "# Generated by Django 2.1.7 on 2020-01-09 08:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('goods', '0004_auto_20200109_0713'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='banner',\n name='show_type',\n field=models.IntegerField(choices=[(1, '首页轮播'), (2, '最新活动')], default=1, verbose_name='展示控制'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
class FizzBuzz:
def convert(self, number):
# raise NotImplementedError
# for number in range(1, 101):
if number%3 == 0 and number%5 != 0:
return ("Fizz")
elif number%3 != 0 and number%5 == 0:
return("Buzz")
elif number%3 == 0 and number%5 == 0:
return("FizzBuzz")
else:
return str(number)
|
normal
|
{
"blob_id": "fb9d639bca59ecb081e7d9f30f97bdcd35627d34",
"index": 6124,
"step-1": "<mask token>\n",
"step-2": "class FizzBuzz:\n <mask token>\n",
"step-3": "class FizzBuzz:\n\n def convert(self, number):\n if number % 3 == 0 and number % 5 != 0:\n return 'Fizz'\n elif number % 3 != 0 and number % 5 == 0:\n return 'Buzz'\n elif number % 3 == 0 and number % 5 == 0:\n return 'FizzBuzz'\n else:\n return str(number)\n",
"step-4": "# -*- coding: utf-8 -*-\n\nclass FizzBuzz:\n\n def convert(self, number):\n # raise NotImplementedError\n # for number in range(1, 101):\n if number%3 == 0 and number%5 != 0:\n return (\"Fizz\")\n elif number%3 != 0 and number%5 == 0:\n return(\"Buzz\")\n elif number%3 == 0 and number%5 == 0:\n return(\"FizzBuzz\")\n else:\n return str(number)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Author: Charse
# py 列表的使用
import copy
name = ["111", "222", "333", "444", "555"]
# 从列表中取得元素
print(name[0], name[2]) # 111 333
print(name[1:3]) # 切片 ['222', '333']
print(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的
print(name[0:3]) # ['111', '222', '333']
print(name[-2:]) # ['444', '555'] 与name
# 往列表中添加元素
name.append("666") # 直接在末尾添加
name.insert(1, "999") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延
print(name)
# 修改列表中元素
name[0] = "000"
print(name)
# 删除元素
name.pop() # 默认是删除最后一个下标
print(name)
name.pop(2)
print(name)
# 取出指定元素的下标
print(name.index("999"))
# 反转 改变的是分组里面的元素
name.reverse()
print(name)
# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素
name.sort()
print(name)
# name.clear() remove all items 删除所有的元素
# 复制列表
name2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了
print(name2)
name[1] = "xxx" # name2中是不会进行修改的
names = ["1", [1, 2], "2"]
names[1][0] = 9
print(names)
names1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy
names3 = name[:]
print("name3:", names3)
# 进行深copy
names2 = copy.deepcopy(names)
# 对列表的元素进行修改,两者是同样的被修改
# names2 元素内的列表是不会被修改的
names[1][1] = 3
print(names)
print(names1)
print(names2)
# 遍历列表
for i in names2:
print(i)
# 跳跃打印: 从0 开始打印, 到末尾, 步长为2
print(name[0:-1:2])
# 0, -1可以进行省略
print(name[::2])
'''
深浅copy
'''
|
normal
|
{
"blob_id": "d517c1e2eb4d37a2584f1603c704efce6834df92",
"index": 7443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\n<mask token>\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\n<mask token>\nprint(name2)\n<mask token>\nprint(names)\n<mask token>\nprint('name3:', names3)\n<mask token>\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-3": "<mask token>\nname = ['111', '222', '333', '444', '555']\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\nname[0] = '000'\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\nname2 = name.copy()\nprint(name2)\nname[1] = 'xxx'\nnames = ['1', [1, 2], '2']\nnames[1][0] = 9\nprint(names)\nnames1 = copy.copy(names)\nnames3 = name[:]\nprint('name3:', names3)\nnames2 = copy.deepcopy(names)\nnames[1][1] = 3\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-4": "import copy\nname = ['111', '222', '333', '444', '555']\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\nname[0] = '000'\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\nname2 = name.copy()\nprint(name2)\nname[1] = 'xxx'\nnames = ['1', [1, 2], '2']\nnames[1][0] = 9\nprint(names)\nnames1 = copy.copy(names)\nnames3 = name[:]\nprint('name3:', names3)\nnames2 = copy.deepcopy(names)\nnames[1][1] = 3\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-5": "# Author: Charse\n# py 列表的使用\n\nimport copy\n\n\nname = [\"111\", \"222\", \"333\", \"444\", \"555\"]\n\n# 从列表中取得元素\nprint(name[0], name[2]) # 111 333\nprint(name[1:3]) # 切片 ['222', '333']\nprint(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的\nprint(name[0:3]) # ['111', '222', '333']\nprint(name[-2:]) # ['444', '555'] 与name\n\n# 往列表中添加元素\nname.append(\"666\") # 直接在末尾添加\nname.insert(1, \"999\") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延\nprint(name)\n\n# 修改列表中元素\nname[0] = \"000\"\nprint(name)\n\n# 删除元素\nname.pop() # 默认是删除最后一个下标\nprint(name)\nname.pop(2)\nprint(name)\n\n# 取出指定元素的下标\nprint(name.index(\"999\"))\n\n# 反转 改变的是分组里面的元素\nname.reverse()\nprint(name)\n\n# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素\nname.sort()\nprint(name)\n\n# name.clear() remove all items 删除所有的元素\n\n# 复制列表\nname2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了\nprint(name2)\nname[1] = \"xxx\" # name2中是不会进行修改的\n\nnames = [\"1\", [1, 2], \"2\"]\n\nnames[1][0] = 9\nprint(names)\n\nnames1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy\n\nnames3 = name[:]\n\nprint(\"name3:\", names3)\n\n\n# 进行深copy\nnames2 = copy.deepcopy(names)\n\n# 对列表的元素进行修改,两者是同样的被修改\n# names2 元素内的列表是不会被修改的\nnames[1][1] = 3\n\nprint(names)\nprint(names1)\nprint(names2)\n\n# 遍历列表\nfor i in names2:\n print(i)\n\n\n\n# 跳跃打印: 从0 开始打印, 到末尾, 步长为2\nprint(name[0:-1:2])\n# 0, -1可以进行省略\nprint(name[::2])\n\n\n'''\n深浅copy\n\n\n'''\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from .models import Recipe, Ingredient, Category, Tag
from blog.widgets import CustomClearableFileInput
class NewCategoriesForm(forms.ModelForm):
friendly_name = forms.CharField(label='... or add your own category',
required=False)
class Meta():
model = Category
fields = ('friendly_name',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'friendly_name': 'One single word only'
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class NewTagsForm(forms.ModelForm):
tagname = forms.CharField(label='... or add your own tag', required=False)
class Meta():
model = Tag
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'tagname': 'One single word only'
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
class IngredientForm(forms.ModelForm):
class Meta:
model = Ingredient
exclude = ('recipe', )
labels = {
'quantity': 'Qty',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
placeholders = {
'quantity': 'eg: 0.1',
'unit': 'eg: ml',
'preparation': 'eg: chopped',
'name': 'eg: tomatoes'
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['quantity'].widget.attrs['min'] = 0.01
IngredientFormSet = forms.inlineformset_factory(Recipe, Ingredient,
form=IngredientForm,
extra=25,
min_num=1,
validate_min=True)
class RecipeForm(forms.ModelForm):
# Replace image field
image = forms.ImageField(label='Image',
required=False,
widget=CustomClearableFileInput)
# Change rendering of form to user-friendly checkboxes
# Credit:
# https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024
category = forms.ModelMultipleChoiceField(
queryset=Category.objects.all(),
label='Choose some categories from the list',
required=False,
widget=forms.CheckboxSelectMultiple
)
# Change rendering of form to user-friendly checkboxes
# Credit:
# https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024
tag = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
label='Choose some tags from the list',
required=False,
widget=forms.CheckboxSelectMultiple
)
class Meta:
model = Recipe
exclude = ('author', 'date',
'date_posted', 'date_edited',
'vote_count', 'votes', 'recipe_box',
'mail_sent', 'discount_code',)
labels = {
'intro': 'Brief Description',
}
def clean_servings(self):
value = self.cleaned_data.get('servings')
if value < 1:
raise forms.ValidationError('The number of servings must be \
greater than zero')
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = Category.objects.all().order_by('friendly_name')
friendly_name = [(c.id, c.get_friendly_name()) for c in categories]
placeholders = {
'title': 'eg: Carrot Cake',
'intro': 'eg: A deliciously sweet dessert',
'prep_time': 'eg: 1hr 20mins',
'cook_time': 'eg: 1hr 20mins',
'total_time': 'eg: 1hr 20mins',
'directions': 'Describe the steps to make this recipe',
'image': '',
'image_credit': 'Who took the photo?',
'servings': 'No. of servings',
'tag': '',
'category': '',
}
for field in self.fields:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['category'].choices = friendly_name
self.fields['title'].widget.attrs['autofocus'] = True
self.fields['directions'].required = True
|
normal
|
{
"blob_id": "7484bd9012bc9952b679073ae036de4554d362be",
"index": 5175,
"step-1": "<mask token>\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-2": "<mask token>\n\n\nclass NewTagsForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n <mask token>\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-3": "<mask token>\n\n\nclass NewTagsForm(forms.ModelForm):\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'tagname': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-4": "<mask token>\n\n\nclass NewCategoriesForm(forms.ModelForm):\n friendly_name = forms.CharField(label='... or add your own category',\n required=False)\n\n\n class Meta:\n model = Category\n fields = 'friendly_name',\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'friendly_name': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass NewTagsForm(forms.ModelForm):\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n\n class Meta:\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'tagname': 'One single word only'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n\n\n class Meta:\n model = Ingredient\n exclude = 'recipe',\n labels = {'quantity': 'Qty'}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {'quantity': 'eg: 0.1', 'unit': 'eg: ml',\n 'preparation': 'eg: chopped', 'name': 'eg: tomatoes'}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\n<mask token>\n\n\nclass RecipeForm(forms.ModelForm):\n image = forms.ImageField(label='Image', required=False, widget=\n CustomClearableFileInput)\n category = forms.ModelMultipleChoiceField(queryset=Category.objects.all\n (), label='Choose some categories from the list', required=False,\n widget=forms.CheckboxSelectMultiple)\n tag = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(), label=\n 'Choose some tags from the list', required=False, widget=forms.\n CheckboxSelectMultiple)\n\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date', 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box', 'mail_sent', 'discount_code')\n labels = {'intro': 'Brief Description'}\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError(\n 'The number of servings must be greater than zero'\n )\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n placeholders = {'title': 'eg: Carrot Cake', 'intro':\n 'eg: A deliciously sweet dessert', 'prep_time':\n 'eg: 1hr 20mins', 'cook_time': 'eg: 1hr 20mins', 'total_time':\n 'eg: 1hr 20mins', 'directions':\n 'Describe the steps to make this recipe', 'image': '',\n 'image_credit': 'Who took the photo?', 'servings':\n 'No. of servings', 'tag': '', 'category': ''}\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-5": "from django import forms\nfrom .models import Recipe, Ingredient, Category, Tag\nfrom blog.widgets import CustomClearableFileInput\n\n\nclass NewCategoriesForm(forms.ModelForm):\n\n friendly_name = forms.CharField(label='... or add your own category',\n required=False)\n\n class Meta():\n model = Category\n fields = ('friendly_name',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'friendly_name': 'One single word only'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass NewTagsForm(forms.ModelForm):\n\n tagname = forms.CharField(label='... or add your own tag', required=False)\n\n class Meta():\n model = Tag\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'tagname': 'One single word only'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n\nclass IngredientForm(forms.ModelForm):\n class Meta:\n model = Ingredient\n exclude = ('recipe', )\n\n labels = {\n 'quantity': 'Qty',\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'quantity': 'eg: 0.1',\n 'unit': 'eg: ml',\n 'preparation': 'eg: chopped',\n 'name': 'eg: tomatoes'\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n self.fields['quantity'].widget.attrs['min'] = 0.01\n\n\nIngredientFormSet = forms.inlineformset_factory(Recipe, Ingredient,\n form=IngredientForm,\n extra=25,\n min_num=1,\n validate_min=True)\n\n\nclass RecipeForm(forms.ModelForm):\n\n # Replace image field\n image = forms.ImageField(label='Image',\n required=False,\n widget=CustomClearableFileInput)\n\n # Change rendering of form to user-friendly checkboxes\n # Credit:\n # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024\n category = forms.ModelMultipleChoiceField(\n queryset=Category.objects.all(),\n label='Choose some categories from the list',\n required=False,\n widget=forms.CheckboxSelectMultiple\n )\n\n # Change rendering of form to user-friendly checkboxes\n # Credit:\n # https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024\n tag = forms.ModelMultipleChoiceField(\n queryset=Tag.objects.all(),\n label='Choose some tags from the list',\n required=False,\n widget=forms.CheckboxSelectMultiple\n )\n\n class Meta:\n model = Recipe\n exclude = ('author', 'date',\n 'date_posted', 'date_edited',\n 'vote_count', 'votes', 'recipe_box',\n 'mail_sent', 'discount_code',)\n\n labels = {\n 'intro': 'Brief Description',\n }\n\n def clean_servings(self):\n value = self.cleaned_data.get('servings')\n if value < 1:\n raise forms.ValidationError('The number of servings must be \\\n greater than zero')\n return value\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all().order_by('friendly_name')\n friendly_name = [(c.id, c.get_friendly_name()) for c in categories]\n\n placeholders = {\n 'title': 'eg: Carrot Cake',\n 'intro': 'eg: A deliciously sweet dessert',\n 'prep_time': 'eg: 1hr 20mins',\n 'cook_time': 'eg: 1hr 20mins',\n 'total_time': 'eg: 1hr 20mins',\n 'directions': 'Describe the steps to make this recipe',\n 'image': '',\n 'image_credit': 'Who took the photo?',\n 'servings': 'No. of servings',\n 'tag': '',\n 'category': '',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields['category'].choices = friendly_name\n self.fields['title'].widget.attrs['autofocus'] = True\n self.fields['directions'].required = True\n",
"step-ids": [
6,
7,
9,
12,
15
]
}
|
[
6,
7,
9,
12,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .slinklist import SingleLinkedList
|
flexible
|
{
"blob_id": "2a5d498a386190bdd2c05bc2b14db0fecd707162",
"index": 1128,
"step-1": "<mask token>\n",
"step-2": "from .slinklist import SingleLinkedList\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Common methods shared by MNIST and ImageNet experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import errno
import getpass
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# mkdir -p in Python >2.5
def mkdir_p(path):
try:
os.makedirs(path, mode=0o755)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# Returns path to postfix under user's Unix home directory.
def make_experiment_dir(postfix):
home = os.path.expanduser('~')
exp_dir = os.path.join(home, postfix)
mkdir_p(exp_dir)
return exp_dir
# appends .png to file name
def save_fig(folder, filename):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.png')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
plt.savefig(out_file)
# appends .txt to file name
def save_array(x, folder, filename, formatting):
if folder is None:
return
filename_out = os.path.join(folder, filename + '.txt')
print('saving {}'.format(filename_out))
with open(filename_out, 'w') as out_file:
np.savetxt(out_file, x, fmt=formatting)
def load_array(filename):
with open(filename, 'r') as f:
return np.loadtxt(f)
# count parameters for svd truncation
def count_parameters_list(k_values, nrows, ncols):
new_list = []
for k in k_values:
new_k = count_parameters(k, nrows, ncols)
new_list.append(new_k)
return new_list
# number of parameters when nrows-by-ncols matrix is approximated
# with product of nrows-by-rank and rank-by-ncolds matrix.
def count_parameters(rank, nrows, ncols):
return (nrows + ncols) * rank
# Return one random rademacher matrix
def fully_random_rademacher_matrix(nrows, ncols):
plus_minus_one = np.array([-1, 1], dtype=np.float32)
return np.random.choice(plus_minus_one, (nrows, ncols))
# Return a rank-1 Rademacher matrix
def rank1_rademacher(nrows, ncols):
plus_minus_one = np.array([-1, 1], dtype=np.float32)
column_vector = np.random.choice(plus_minus_one, (nrows, 1))
row_vector = np.random.choice(plus_minus_one, (1, ncols))
# Plain * is quicker than equivalent np.dot(column_vector, row_vector)
return column_vector * row_vector
# Sketch matrix A
def sketch_matrix(A, sketch_type, k):
tf.logging.info('sketch_matrix %s %d', sketch_type, k)
h1 = A.shape[0]
h2 = A.shape[1]
# Numpy defaults to int64 or float64 (double precision).
# Computing with float32 (single precision) is quicker.
A_hat = np.zeros((h1, h2), dtype=np.float32)
for i in range(0, k):
tf.logging.log_every_n(tf.logging.INFO, 'sketch_matrix %s iter %d/%d', 1000,
sketch_type, i, k)
# generate random matrix
if sketch_type == 'arora':
mat = fully_random_rademacher_matrix(h1, h2)
elif sketch_type == 'our_sketch':
mat = rank1_rademacher(h1, h2)
else:
print('wrong sketch_type variable')
return -1
# get coefficient
coefficient = np.dot(np.ravel(A), np.ravel(mat))
# add coefficient*matrix to A_hat
A_hat += coefficient * mat
tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)
return (1.0 / k) * A_hat
# Return truncated svd of A, where only the top k components are used.
# Adding --copt=-mavx --copt=-mavx2 --copt=-mfma compiler flags
# speeds up svd by almost 2x. However it makes sketching, which is dominant,
# a tiny bit slower and hence it's not worth it.
def truncated_svd(A, k):
tf.logging.info('Computing SVD ...')
u, s, v = np.linalg.svd(A, full_matrices=False)
u_trunc = u[:, 0:k]
s_trunc = s[0:k]
v_trunc = v[0:k, :]
A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))
tf.logging.info('Done computing SVD ...')
return A_hat
# num_params is rank for SVD, number of coefficients for sketches.
def compress(A, compression_type, num_params):
if compression_type == 'svd':
A_hat = truncated_svd(A, num_params)
elif compression_type == 'our_sketch' or compression_type == 'arora':
A_hat = sketch_matrix(A, compression_type, num_params)
else:
print('Error: wrong compression type. Must be svd, our_sketch, or arora.')
return A_hat
# return singular values of A sorted in descending order
def singular_values(A):
u, s, v = np.linalg.svd(A)
sing = sorted(s, reverse=True)
return sing
def plot_and_save_singular_values(s, folder, fn, nrows, ncols):
x = range(1, len(s) + 1)
y = sorted(s, reverse=True)
title = 'Singular values\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'
plt.plot(x, y)
plt.title(title)
plt.tight_layout()
save_fig(folder, fn)
save_array(np.array(s), folder, fn + '_vals', '%.18e')
|
normal
|
{
"blob_id": "f253816d08407950caad28f1ce630ac2b099aa70",
"index": 3241,
"step-1": "<mask token>\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\n<mask token>\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\n<mask token>\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n",
"step-2": "<mask token>\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\n<mask token>\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n",
"step-3": "<mask token>\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\ndef rank1_rademacher(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n column_vector = np.random.choice(plus_minus_one, (nrows, 1))\n row_vector = np.random.choice(plus_minus_one, (1, ncols))\n return column_vector * row_vector\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n",
"step-4": "<mask token>\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path, mode=493)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\ndef rank1_rademacher(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n column_vector = np.random.choice(plus_minus_one, (nrows, 1))\n row_vector = np.random.choice(plus_minus_one, (1, ncols))\n return column_vector * row_vector\n\n\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO,\n 'sketch_matrix %s iter %d/%d', 1000, sketch_type, i, k)\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return 1.0 / k * A_hat\n\n\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print(\n 'Error: wrong compression type. Must be svd, our_sketch, or arora.'\n )\n return A_hat\n\n\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n",
"step-5": "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" Common methods shared by MNIST and ImageNet experiments.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport errno\nimport getpass\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\n# mkdir -p in Python >2.5\ndef mkdir_p(path):\n try:\n os.makedirs(path, mode=0o755)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\n# Returns path to postfix under user's Unix home directory.\ndef make_experiment_dir(postfix):\n home = os.path.expanduser('~')\n exp_dir = os.path.join(home, postfix)\n mkdir_p(exp_dir)\n return exp_dir\n\n\n# appends .png to file name\ndef save_fig(folder, filename):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.png')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n plt.savefig(out_file)\n\n\n# appends .txt to file name\ndef save_array(x, folder, filename, formatting):\n if folder is None:\n return\n filename_out = os.path.join(folder, filename + '.txt')\n print('saving {}'.format(filename_out))\n with open(filename_out, 'w') as out_file:\n np.savetxt(out_file, x, fmt=formatting)\n\n\ndef load_array(filename):\n with open(filename, 'r') as f:\n return np.loadtxt(f)\n\n\n# count parameters for svd truncation\ndef count_parameters_list(k_values, nrows, ncols):\n new_list = []\n for k in k_values:\n new_k = count_parameters(k, nrows, ncols)\n new_list.append(new_k)\n return new_list\n\n\n# number of parameters when nrows-by-ncols matrix is approximated\n# with product of nrows-by-rank and rank-by-ncolds matrix.\ndef count_parameters(rank, nrows, ncols):\n return (nrows + ncols) * rank\n\n\n# Return one random rademacher matrix\ndef fully_random_rademacher_matrix(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n return np.random.choice(plus_minus_one, (nrows, ncols))\n\n\n# Return a rank-1 Rademacher matrix\ndef rank1_rademacher(nrows, ncols):\n plus_minus_one = np.array([-1, 1], dtype=np.float32)\n column_vector = np.random.choice(plus_minus_one, (nrows, 1))\n row_vector = np.random.choice(plus_minus_one, (1, ncols))\n # Plain * is quicker than equivalent np.dot(column_vector, row_vector)\n return column_vector * row_vector\n\n# Sketch matrix A\ndef sketch_matrix(A, sketch_type, k):\n tf.logging.info('sketch_matrix %s %d', sketch_type, k)\n h1 = A.shape[0]\n h2 = A.shape[1]\n # Numpy defaults to int64 or float64 (double precision).\n # Computing with float32 (single precision) is quicker.\n A_hat = np.zeros((h1, h2), dtype=np.float32)\n for i in range(0, k):\n tf.logging.log_every_n(tf.logging.INFO, 'sketch_matrix %s iter %d/%d', 1000,\n sketch_type, i, k)\n # generate random matrix\n if sketch_type == 'arora':\n mat = fully_random_rademacher_matrix(h1, h2)\n elif sketch_type == 'our_sketch':\n mat = rank1_rademacher(h1, h2)\n else:\n print('wrong sketch_type variable')\n return -1\n # get coefficient\n coefficient = np.dot(np.ravel(A), np.ravel(mat))\n # add coefficient*matrix to A_hat\n A_hat += coefficient * mat\n tf.logging.info('Done sketch_matrix %s %d', sketch_type, k)\n return (1.0 / k) * A_hat\n\n\n# Return truncated svd of A, where only the top k components are used.\n# Adding --copt=-mavx --copt=-mavx2 --copt=-mfma compiler flags\n# speeds up svd by almost 2x. However it makes sketching, which is dominant,\n# a tiny bit slower and hence it's not worth it.\ndef truncated_svd(A, k):\n tf.logging.info('Computing SVD ...')\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u_trunc = u[:, 0:k]\n s_trunc = s[0:k]\n v_trunc = v[0:k, :]\n A_hat = np.dot(u_trunc, np.dot(np.diag(s_trunc), v_trunc))\n tf.logging.info('Done computing SVD ...')\n return A_hat\n\n# num_params is rank for SVD, number of coefficients for sketches.\ndef compress(A, compression_type, num_params):\n if compression_type == 'svd':\n A_hat = truncated_svd(A, num_params)\n elif compression_type == 'our_sketch' or compression_type == 'arora':\n A_hat = sketch_matrix(A, compression_type, num_params)\n else:\n print('Error: wrong compression type. Must be svd, our_sketch, or arora.')\n return A_hat\n\n\n# return singular values of A sorted in descending order\ndef singular_values(A):\n u, s, v = np.linalg.svd(A)\n sing = sorted(s, reverse=True)\n return sing\n\ndef plot_and_save_singular_values(s, folder, fn, nrows, ncols):\n x = range(1, len(s) + 1)\n y = sorted(s, reverse=True)\n title = 'Singular values\\ndim = (' + str(nrows) + 'x' + str(ncols) + ')'\n plt.plot(x, y)\n plt.title(title)\n plt.tight_layout()\n save_fig(folder, fn)\n save_array(np.array(s), folder, fn + '_vals', '%.18e')\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
def fibonacci(num):
f_1 = 0
f_2 = 1
answer = 0
for i in range(num-1):
answer = f_1 + f_2
f_1 = f_2
f_2 = answer
return answer
# 아래는 테스트로 출력해 보기 위한 코드입니다.
print(fibonacci(3))
|
normal
|
{
"blob_id": "c3d0a9bdbfd5b6f2b960ee2c1f11ec4acf508310",
"index": 8458,
"step-1": "<mask token>\n",
"step-2": "def fibonacci(num):\n f_1 = 0\n f_2 = 1\n answer = 0\n for i in range(num - 1):\n answer = f_1 + f_2\n f_1 = f_2\n f_2 = answer\n return answer\n\n\n<mask token>\n",
"step-3": "def fibonacci(num):\n f_1 = 0\n f_2 = 1\n answer = 0\n for i in range(num - 1):\n answer = f_1 + f_2\n f_1 = f_2\n f_2 = answer\n return answer\n\n\nprint(fibonacci(3))\n",
"step-4": "def fibonacci(num):\n f_1 = 0\n f_2 = 1\n answer = 0\n for i in range(num-1):\n answer = f_1 + f_2\n f_1 = f_2\n f_2 = answer\n return answer\n\n# 아래는 테스트로 출력해 보기 위한 코드입니다.\nprint(fibonacci(3))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
function handler(event, context, callback){
var
AWS = require("aws-sdk"),
DDB = new AWS.DynamoDB({
apiVersion: "2012-08-10",
region: "us-east-1"
}),
city_str = event.city_str.toUpperCase(),
data = {
city_str: city_str,
temp_int_str: 72
},
response = {},
params = {
TableName: "weather",
KeyConditionExpression: "sc = :v1",
ExpressionAttributeValues: {
":v1":{
S: city_str
}
}
};
DDB.query(params, function(err, data){
var
item = {},
response = {
statusCode: 200,
headers: {},
body: null
};
if(err){
response.statusCode = 500;
console.log(err);
response.body = err;
}else{
// console.log(data.Items[0]);
var data = data.Items[0];
if(data && data.t){
console.log(data.sc.S + " and " + data.t.N);
item = {
temp_int:Number(data.t.N),
city_str: data.sc.S
};
}else{
item = {
city_str: event.city_str
//when we don't return a temp, the client can say city not found
};
}
}
response = item;
// console.log(response);
callback(null, response);
});
}
exports.handler = handler;
|
normal
|
{
"blob_id": "7bac3b224586f8c42a104123432a7321a1251369",
"index": 7115,
"step-1": "function handler(event, context, callback){\r\n var \r\n AWS = require(\"aws-sdk\"),\r\n DDB = new AWS.DynamoDB({\r\n apiVersion: \"2012-08-10\",\r\n region: \"us-east-1\"\r\n }),\r\n \r\n city_str = event.city_str.toUpperCase(),\r\n data = {\r\n city_str: city_str,\r\n temp_int_str: 72\r\n },\r\n response = {},\r\n params = {\r\n TableName: \"weather\",\r\n KeyConditionExpression: \"sc = :v1\",\r\n ExpressionAttributeValues: {\r\n \":v1\":{\r\n S: city_str\r\n }\r\n }\r\n };\r\n \r\n \tDDB.query(params, function(err, data){\r\n var\r\n \t\titem = {},\r\n \tresponse = {\r\n \tstatusCode: 200,\r\n \theaders: {},\r\n \tbody: null\r\n \t};\r\n if(err){\r\n response.statusCode = 500;\r\n console.log(err);\r\n response.body = err;\r\n }else{\r\n // console.log(data.Items[0]);\r\n var data = data.Items[0];\r\n if(data && data.t){\r\n console.log(data.sc.S + \" and \" + data.t.N);\r\n \titem = {\r\n temp_int:Number(data.t.N),\r\n city_str: data.sc.S\r\n \t};\r\n }else{\r\n item = {\r\n \tcity_str: event.city_str\r\n //when we don't return a temp, the client can say city not found\r\n \t};\r\n }\r\n }\r\n response = item;\r\n // console.log(response);\r\n callback(null, response);\r\n });\r\n}\r\nexports.handler = handler;",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
img = cv.imread(imgpath)
newimg1 = jarvis_judice_ninke_1(img) * 255
newimg2 = jarvis_judice_ninke_2(img) * 255
cv.imshow('Imagem original', img)
cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)
cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)
print('')
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +
imgname, newimg1)
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +
imgname, newimg2)
print('Resultados salvos em:')
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)
cv.waitKey(0)
cv.destroyAllWindows()
except:
print('Erro')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
imgname = sys.argv[1]
imgpath = 'img/' + imgname
try:
img = cv.imread(imgpath)
newimg1 = jarvis_judice_ninke_1(img) * 255
newimg2 = jarvis_judice_ninke_2(img) * 255
cv.imshow('Imagem original', img)
cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)
cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)
print('')
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +
imgname, newimg1)
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +
imgname, newimg2)
print('Resultados salvos em:')
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)
cv.waitKey(0)
cv.destroyAllWindows()
except:
print('Erro')
<|reserved_special_token_1|>
import cv2 as cv
import numpy as np
import sys
from meio_tom_lib import *
imgname = sys.argv[1]
imgpath = 'img/' + imgname
try:
img = cv.imread(imgpath)
newimg1 = jarvis_judice_ninke_1(img) * 255
newimg2 = jarvis_judice_ninke_2(img) * 255
cv.imshow('Imagem original', img)
cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)
cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)
print('')
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +
imgname, newimg1)
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +
imgname, newimg2)
print('Resultados salvos em:')
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)
cv.waitKey(0)
cv.destroyAllWindows()
except:
print('Erro')
<|reserved_special_token_1|>
import cv2 as cv
import numpy as np
import sys
from meio_tom_lib import *
imgname = sys.argv[1]
imgpath = "img/" + imgname
try:
img = cv.imread(imgpath)
newimg1 = jarvis_judice_ninke_1(img)*255
newimg2 = jarvis_judice_ninke_2(img)*255
cv.imshow("Imagem original",img)
cv.imshow("Jarvis, Judice e Ninke metodo 1",newimg1)
cv.imshow("Jarvis, Judice e Ninke metodo 2",newimg2)
print("")
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname,newimg1)
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname,newimg2)
print("Resultados salvos em:")
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname)
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname)
cv.waitKey(0)
cv.destroyAllWindows()
except:
print("Erro")
|
flexible
|
{
"blob_id": "bf764457e6af25d2d9406b18af51f63b36ab823a",
"index": 8564,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n img = cv.imread(imgpath)\n newimg1 = jarvis_judice_ninke_1(img) * 255\n newimg2 = jarvis_judice_ninke_2(img) * 255\n cv.imshow('Imagem original', img)\n cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)\n cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)\n print('')\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +\n imgname, newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +\n imgname, newimg2)\n print('Resultados salvos em:')\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)\n cv.waitKey(0)\n cv.destroyAllWindows()\nexcept:\n print('Erro')\n",
"step-3": "<mask token>\nimgname = sys.argv[1]\nimgpath = 'img/' + imgname\ntry:\n img = cv.imread(imgpath)\n newimg1 = jarvis_judice_ninke_1(img) * 255\n newimg2 = jarvis_judice_ninke_2(img) * 255\n cv.imshow('Imagem original', img)\n cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)\n cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)\n print('')\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +\n imgname, newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +\n imgname, newimg2)\n print('Resultados salvos em:')\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)\n cv.waitKey(0)\n cv.destroyAllWindows()\nexcept:\n print('Erro')\n",
"step-4": "import cv2 as cv\nimport numpy as np\nimport sys\nfrom meio_tom_lib import *\nimgname = sys.argv[1]\nimgpath = 'img/' + imgname\ntry:\n img = cv.imread(imgpath)\n newimg1 = jarvis_judice_ninke_1(img) * 255\n newimg2 = jarvis_judice_ninke_2(img) * 255\n cv.imshow('Imagem original', img)\n cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)\n cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)\n print('')\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +\n imgname, newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +\n imgname, newimg2)\n print('Resultados salvos em:')\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)\n cv.waitKey(0)\n cv.destroyAllWindows()\nexcept:\n print('Erro')\n",
"step-5": "import cv2 as cv\nimport numpy as np\nimport sys\nfrom meio_tom_lib import *\n\nimgname = sys.argv[1]\nimgpath = \"img/\" + imgname\n\n\ntry:\n img = cv.imread(imgpath)\n\n newimg1 = jarvis_judice_ninke_1(img)*255\n newimg2 = jarvis_judice_ninke_2(img)*255\n\n cv.imshow(\"Imagem original\",img)\n cv.imshow(\"Jarvis, Judice e Ninke metodo 1\",newimg1)\n cv.imshow(\"Jarvis, Judice e Ninke metodo 2\",newimg2)\n\n print(\"\")\n\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname,newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname,newimg2)\n\n print(\"Resultados salvos em:\")\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname)\n\n cv.waitKey(0)\n cv.destroyAllWindows()\n \nexcept:\n print(\"Erro\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.forms import ModelForm, ChoiceField, Form, FileField, ModelChoiceField, HiddenInput, ValidationError
from market.models import *
class OrderForm(ModelForm):
"""Order form used in trader view."""
# from http://stackoverflow.com/questions/1697702/how-to-pass-initial-parameter-to-djangos-modelform-instance/1697770#1697770
# price from http://stackoverflow.com/questions/6473895/how-to-restrict-values-in-a-django-decimalfield
# restricts prices to 0.0 through 2.0
PRICE_CHOICES = [(i*.01, str(i*.01)) for i in range(1,201)]
price = ChoiceField(choices=PRICE_CHOICES)
trader = ModelChoiceField(label='', queryset=Trader.objects.all(), widget=HiddenInput())
market = ModelChoiceField(label='', queryset=Market.objects.all(), widget=HiddenInput())
def clean(self):
"""Validates the data. Ensures the trader has enough cash or shares
to complete the requested order."""
cleaned_data = self.cleaned_data
if cleaned_data.get('order') and cleaned_data.get('stock') \
and cleaned_data.get('volume') and cleaned_data.get('price'):
t = cleaned_data['trader']
if cleaned_data['order'] == 'B': # buy order
open_orders = Order.objects.filter(trader=t,
order='B', completed=False)
open_order_value = float(sum([o.volume * o.price for o in open_orders]))
open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])
if open_order_value > t.cash:
raise ValidationError("You don't have enough cash!")
elif cleaned_data['order'] == 'S': # sell order!
open_orders = sum(Order.objects.filter(trader=t, order='S',
stock=cleaned_data['stock'],
completed=False).values_list('volume', flat=True))
open_orders += cleaned_data['volume']
if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:
raise ValidationError("You don't have enough shares!")
return cleaned_data
class Meta:
model = Order
fields = ('stock', 'order', 'volume', 'price', 'trader', 'market')
class UploadFileForm(Form):
file = FileField()
|
normal
|
{
"blob_id": "044e3479c32357e22ca3165d8601d8bd2a439fcb",
"index": 2329,
"step-1": "<mask token>\n\n\nclass OrderForm(ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-2": "<mask token>\n\n\nclass OrderForm(ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock'\n ) and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B':\n open_orders = Order.objects.filter(trader=t, order='B',\n completed=False)\n open_order_value = float(sum([(o.volume * o.price) for o in\n open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(\n cleaned_data['price'])\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n elif cleaned_data['order'] == 'S':\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'], completed=False).\n values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']\n ).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-3": "<mask token>\n\n\nclass OrderForm(ModelForm):\n <mask token>\n PRICE_CHOICES = [(i * 0.01, str(i * 0.01)) for i in range(1, 201)]\n price = ChoiceField(choices=PRICE_CHOICES)\n trader = ModelChoiceField(label='', queryset=Trader.objects.all(),\n widget=HiddenInput())\n market = ModelChoiceField(label='', queryset=Market.objects.all(),\n widget=HiddenInput())\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock'\n ) and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B':\n open_orders = Order.objects.filter(trader=t, order='B',\n completed=False)\n open_order_value = float(sum([(o.volume * o.price) for o in\n open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(\n cleaned_data['price'])\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n elif cleaned_data['order'] == 'S':\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'], completed=False).\n values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']\n ).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-4": "<mask token>\n\n\nclass OrderForm(ModelForm):\n \"\"\"Order form used in trader view.\"\"\"\n PRICE_CHOICES = [(i * 0.01, str(i * 0.01)) for i in range(1, 201)]\n price = ChoiceField(choices=PRICE_CHOICES)\n trader = ModelChoiceField(label='', queryset=Trader.objects.all(),\n widget=HiddenInput())\n market = ModelChoiceField(label='', queryset=Market.objects.all(),\n widget=HiddenInput())\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock'\n ) and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B':\n open_orders = Order.objects.filter(trader=t, order='B',\n completed=False)\n open_order_value = float(sum([(o.volume * o.price) for o in\n open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(\n cleaned_data['price'])\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n elif cleaned_data['order'] == 'S':\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'], completed=False).\n values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']\n ).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-5": "from django.forms import ModelForm, ChoiceField, Form, FileField, ModelChoiceField, HiddenInput, ValidationError\nfrom market.models import *\n\nclass OrderForm(ModelForm):\n \"\"\"Order form used in trader view.\"\"\"\n # from http://stackoverflow.com/questions/1697702/how-to-pass-initial-parameter-to-djangos-modelform-instance/1697770#1697770\n # price from http://stackoverflow.com/questions/6473895/how-to-restrict-values-in-a-django-decimalfield\n\n # restricts prices to 0.0 through 2.0\n PRICE_CHOICES = [(i*.01, str(i*.01)) for i in range(1,201)]\n price = ChoiceField(choices=PRICE_CHOICES)\n trader = ModelChoiceField(label='', queryset=Trader.objects.all(), widget=HiddenInput())\n market = ModelChoiceField(label='', queryset=Market.objects.all(), widget=HiddenInput())\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock') \\\n and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B': # buy order\n open_orders = Order.objects.filter(trader=t,\n order='B', completed=False)\n open_order_value = float(sum([o.volume * o.price for o in open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])\n\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n\n elif cleaned_data['order'] == 'S': # sell order!\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'],\n completed=False).values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n class Meta:\n model = Order\n fields = ('stock', 'order', 'volume', 'price', 'trader', 'market')\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@admin.register(User)
class AuthorizationUserAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@admin.register(User)
class AuthorizationUserAdmin(admin.ModelAdmin):
exclude = ['open_id']
pass
<|reserved_special_token_1|>
from django.contrib import admin
from .models import User
@admin.register(User)
class AuthorizationUserAdmin(admin.ModelAdmin):
exclude = ['open_id']
pass
<|reserved_special_token_1|>
from django.contrib import admin
from .models import User
# Register your models here.
@admin.register(User)
class AuthorizationUserAdmin(admin.ModelAdmin):
exclude = ['open_id']
pass
|
flexible
|
{
"blob_id": "d3585e7b761fa7b2eeaacf09f84bb6a4abc1cf02",
"index": 6806,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n <mask token>\n pass\n",
"step-3": "<mask token>\n\n\[email protected](User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n exclude = ['open_id']\n pass\n",
"step-4": "from django.contrib import admin\nfrom .models import User\n\n\[email protected](User)\nclass AuthorizationUserAdmin(admin.ModelAdmin):\n exclude = ['open_id']\n pass\n",
"step-5": "from django.contrib import admin\r\nfrom .models import User\r\n\r\n\r\n# Register your models here.\r\n\r\[email protected](User)\r\nclass AuthorizationUserAdmin(admin.ModelAdmin):\r\n exclude = ['open_id']\r\n pass\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math as m
import functions_by_alexandra as fba
import funs
from functions_by_alexandra import User, a
from pkg import bps, geom
print(type(funs))
print(type(funs.add ))
#
# print(add(2,3))
print("Result: ", funs.add(10, 20))
print("Result: ", fba.add(10,20))
print(type(fba ))
print(a )
print(m.pi)
p = User()
print(p)
#print(functions_by_alexandra.add(10,20))
print(bps.happy(10,20))
|
normal
|
{
"blob_id": "b53b0e6ff14750bbba3c2e5e2ea2fc5bb1abccec",
"index": 3135,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(type(funs))\nprint(type(funs.add))\nprint('Result: ', funs.add(10, 20))\nprint('Result: ', fba.add(10, 20))\nprint(type(fba))\nprint(a)\nprint(m.pi)\n<mask token>\nprint(p)\nprint(bps.happy(10, 20))\n",
"step-3": "<mask token>\nprint(type(funs))\nprint(type(funs.add))\nprint('Result: ', funs.add(10, 20))\nprint('Result: ', fba.add(10, 20))\nprint(type(fba))\nprint(a)\nprint(m.pi)\np = User()\nprint(p)\nprint(bps.happy(10, 20))\n",
"step-4": "import math as m\nimport functions_by_alexandra as fba\nimport funs\nfrom functions_by_alexandra import User, a\nfrom pkg import bps, geom\nprint(type(funs))\nprint(type(funs.add))\nprint('Result: ', funs.add(10, 20))\nprint('Result: ', fba.add(10, 20))\nprint(type(fba))\nprint(a)\nprint(m.pi)\np = User()\nprint(p)\nprint(bps.happy(10, 20))\n",
"step-5": "import math as m\r\n\r\nimport functions_by_alexandra as fba\r\nimport funs\r\nfrom functions_by_alexandra import User, a\r\nfrom pkg import bps, geom\r\n\r\nprint(type(funs))\r\nprint(type(funs.add ))\r\n#\r\n# print(add(2,3))\r\nprint(\"Result: \", funs.add(10, 20))\r\nprint(\"Result: \", fba.add(10,20))\r\nprint(type(fba ))\r\nprint(a )\r\n\r\nprint(m.pi)\r\n\r\n\r\np = User()\r\nprint(p)\r\n\r\n#print(functions_by_alexandra.add(10,20))\r\n\r\n\r\n\r\nprint(bps.happy(10,20))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
#-*- coding : utf-8 -*-
import string
import keyword
alphas = string.letters + '_'
nums = string.digits
keywords = keyword.kwlist
checklst = alphas + nums
print 'Welcome to the Identifier Checker v1.0'
myInput = raw_input('Identifier to test? ')
if myInput in keywords:
print 'Okay as a keyword'
elif len(myInput) == 1:
if myInput in alphas:
print 'Okay as an Identifier'
else:
print 'invaild: one symbols must be alphanumeric '
elif len(myInput) > 1:
if myInput[0] not in alphas:
print '''invalid: first symbol must be alphabetic'''
else:
for otherChar in myInput[1:]:
if otherChar not in checklst:
print '''invalid:remaining symbols must be alphanumeric'''
break
else:
print '''okay as an identifier'''
|
normal
|
{
"blob_id": "2420c835ff91c1269cb16fca2e60e191e1e8ce13",
"index": 6457,
"step-1": "#!/usr/bin/env python\n#-*- coding : utf-8 -*-\n\nimport string\nimport keyword\n\nalphas = string.letters + '_'\nnums = string.digits\nkeywords = keyword.kwlist\nchecklst = alphas + nums\n\nprint 'Welcome to the Identifier Checker v1.0'\nmyInput = raw_input('Identifier to test? ')\n\nif myInput in keywords:\n\tprint 'Okay as a keyword'\n\nelif len(myInput) == 1:\n\tif myInput in alphas:\n\t\tprint 'Okay as an Identifier'\n\telse:\n\t\tprint 'invaild: one symbols must be alphanumeric '\nelif len(myInput) > 1:\n\tif myInput[0] not in alphas:\n\t\tprint '''invalid: first symbol must be alphabetic'''\n\telse:\n\t\tfor otherChar in myInput[1:]:\n\n\t\t\tif otherChar not in checklst:\n\t\t\t\tprint '''invalid:remaining symbols must be alphanumeric'''\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint '''okay as an identifier'''\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import sys
import time
import json
import socket
from urllib import request, parse
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
import psutil
from daemon import DaemonBase
from host_performence import *
class MyDaemon(DaemonBase):
"""Real Daemon class"""
def __init__(self,
api_url,
monitor_port,
pidfile,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
self.api_url = api_url
self.monitor_port = monitor_port
super().__init__(pidfile, stdin, stdout, stderr)
@staticmethod
def get_host_addrs(family):
for nic, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield (nic, snic.address)
def do_post(self, params):
data = json.dumps(params)
# Json Post
# headers = {'Content-Type': 'application/json'}
# req = request.Request(self.api_url, data=data.encode('utf-8'), headers=headers)
# Form Post eg. ?data=params&code=1
data = parse.urlencode({'data': data})
req = request.Request(self.api_url, data=data.encode('utf-8'))
try:
with request.urlopen(req, timeout=3) as resp:
# print(resp.read().decode('utf-8'))
return resp.status
except Exception as e:
with open('/tmp/test_daemon.err', 'a') as f:
print('%s at: %s' % (e, time.ctime()), file=f)
def tasks(self):
pnic_before = get_net_io_counters()
while 1:
time.sleep(60)
pnic_after = get_net_io_counters()
send_datas = {
'type': 8,
'ip_addr': ''.join([
n[1] for n in self.get_host_addrs(socket.AF_INET)
if n[0] == self.monitor_port
]),
'cpu_perf': get_cpu_percent(),
'mem_perf': get_mem_usage(),
'disk_perf': get_disk_usage(),
'disk_speed': get_disk_speed(),
'net_perf': get_network_traffic(pnic_before, pnic_after)
}
self.do_post(send_datas)
pnic_before = get_net_io_counters()
def run(self):
sys.stdout.write('Daemon started with pid %s\n' % os.getpid())
_p = Process(target=self.tasks, daemon=True)
_p.start()
p = psutil.Process(_p.pid)
while 1:
current_cpu = p.cpu_percent()
current_mem = p.memory_percent()
# print(current_cpu, current_mem, time.ctime(), p.pid, p.ppid())
if p.is_running() and (current_mem > 1 or current_cpu > 1):
p.terminate()
p.wait()
with open('/tmp/test_daemon.log', 'a') as f:
f.write('CPU: %s - MEM: %s - at: %s\n' %
(current_cpu, current_mem, time.ctime()))
_p = Process(target=self.tasks, daemon=True)
_p.start()
sys.stdout.write('The subprocess restart pid %s\n' % _p.pid)
p = psutil.Process(_p.pid)
time.sleep(60)
|
normal
|
{
"blob_id": "6e253747182716f84aa6326aafe15ff82be17378",
"index": 1351,
"step-1": "<mask token>\n\n\nclass MyDaemon(DaemonBase):\n <mask token>\n\n def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',\n stdout='/dev/null', stderr='/dev/null'):\n self.api_url = api_url\n self.monitor_port = monitor_port\n super().__init__(pidfile, stdin, stdout, stderr)\n\n @staticmethod\n def get_host_addrs(family):\n for nic, snics in psutil.net_if_addrs().items():\n for snic in snics:\n if snic.family == family:\n yield nic, snic.address\n <mask token>\n\n def tasks(self):\n pnic_before = get_net_io_counters()\n while 1:\n time.sleep(60)\n pnic_after = get_net_io_counters()\n send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self\n .get_host_addrs(socket.AF_INET) if n[0] == self.\n monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':\n get_mem_usage(), 'disk_perf': get_disk_usage(),\n 'disk_speed': get_disk_speed(), 'net_perf':\n get_network_traffic(pnic_before, pnic_after)}\n self.do_post(send_datas)\n pnic_before = get_net_io_counters()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MyDaemon(DaemonBase):\n <mask token>\n\n def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',\n stdout='/dev/null', stderr='/dev/null'):\n self.api_url = api_url\n self.monitor_port = monitor_port\n super().__init__(pidfile, stdin, stdout, stderr)\n\n @staticmethod\n def get_host_addrs(family):\n for nic, snics in psutil.net_if_addrs().items():\n for snic in snics:\n if snic.family == family:\n yield nic, snic.address\n <mask token>\n\n def tasks(self):\n pnic_before = get_net_io_counters()\n while 1:\n time.sleep(60)\n pnic_after = get_net_io_counters()\n send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self\n .get_host_addrs(socket.AF_INET) if n[0] == self.\n monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':\n get_mem_usage(), 'disk_perf': get_disk_usage(),\n 'disk_speed': get_disk_speed(), 'net_perf':\n get_network_traffic(pnic_before, pnic_after)}\n self.do_post(send_datas)\n pnic_before = get_net_io_counters()\n\n def run(self):\n sys.stdout.write('Daemon started with pid %s\\n' % os.getpid())\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n p = psutil.Process(_p.pid)\n while 1:\n current_cpu = p.cpu_percent()\n current_mem = p.memory_percent()\n if p.is_running() and (current_mem > 1 or current_cpu > 1):\n p.terminate()\n p.wait()\n with open('/tmp/test_daemon.log', 'a') as f:\n f.write('CPU: %s - MEM: %s - at: %s\\n' % (current_cpu,\n current_mem, time.ctime()))\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n sys.stdout.write('The subprocess restart pid %s\\n' % _p.pid)\n p = psutil.Process(_p.pid)\n time.sleep(60)\n",
"step-3": "<mask token>\n\n\nclass MyDaemon(DaemonBase):\n \"\"\"Real Daemon class\"\"\"\n\n def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',\n stdout='/dev/null', stderr='/dev/null'):\n self.api_url = api_url\n self.monitor_port = monitor_port\n super().__init__(pidfile, stdin, stdout, stderr)\n\n @staticmethod\n def get_host_addrs(family):\n for nic, snics in psutil.net_if_addrs().items():\n for snic in snics:\n if snic.family == family:\n yield nic, snic.address\n\n def do_post(self, params):\n data = json.dumps(params)\n data = parse.urlencode({'data': data})\n req = request.Request(self.api_url, data=data.encode('utf-8'))\n try:\n with request.urlopen(req, timeout=3) as resp:\n return resp.status\n except Exception as e:\n with open('/tmp/test_daemon.err', 'a') as f:\n print('%s at: %s' % (e, time.ctime()), file=f)\n\n def tasks(self):\n pnic_before = get_net_io_counters()\n while 1:\n time.sleep(60)\n pnic_after = get_net_io_counters()\n send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self\n .get_host_addrs(socket.AF_INET) if n[0] == self.\n monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':\n get_mem_usage(), 'disk_perf': get_disk_usage(),\n 'disk_speed': get_disk_speed(), 'net_perf':\n get_network_traffic(pnic_before, pnic_after)}\n self.do_post(send_datas)\n pnic_before = get_net_io_counters()\n\n def run(self):\n sys.stdout.write('Daemon started with pid %s\\n' % os.getpid())\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n p = psutil.Process(_p.pid)\n while 1:\n current_cpu = p.cpu_percent()\n current_mem = p.memory_percent()\n if p.is_running() and (current_mem > 1 or current_cpu > 1):\n p.terminate()\n p.wait()\n with open('/tmp/test_daemon.log', 'a') as f:\n f.write('CPU: %s - MEM: %s - at: %s\\n' % (current_cpu,\n current_mem, time.ctime()))\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n sys.stdout.write('The subprocess restart pid %s\\n' % _p.pid)\n p = psutil.Process(_p.pid)\n time.sleep(60)\n",
"step-4": "import os\nimport sys\nimport time\nimport json\nimport socket\nfrom urllib import request, parse\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing import Process\nimport psutil\nfrom daemon import DaemonBase\nfrom host_performence import *\n\n\nclass MyDaemon(DaemonBase):\n \"\"\"Real Daemon class\"\"\"\n\n def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',\n stdout='/dev/null', stderr='/dev/null'):\n self.api_url = api_url\n self.monitor_port = monitor_port\n super().__init__(pidfile, stdin, stdout, stderr)\n\n @staticmethod\n def get_host_addrs(family):\n for nic, snics in psutil.net_if_addrs().items():\n for snic in snics:\n if snic.family == family:\n yield nic, snic.address\n\n def do_post(self, params):\n data = json.dumps(params)\n data = parse.urlencode({'data': data})\n req = request.Request(self.api_url, data=data.encode('utf-8'))\n try:\n with request.urlopen(req, timeout=3) as resp:\n return resp.status\n except Exception as e:\n with open('/tmp/test_daemon.err', 'a') as f:\n print('%s at: %s' % (e, time.ctime()), file=f)\n\n def tasks(self):\n pnic_before = get_net_io_counters()\n while 1:\n time.sleep(60)\n pnic_after = get_net_io_counters()\n send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self\n .get_host_addrs(socket.AF_INET) if n[0] == self.\n monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':\n get_mem_usage(), 'disk_perf': get_disk_usage(),\n 'disk_speed': get_disk_speed(), 'net_perf':\n get_network_traffic(pnic_before, pnic_after)}\n self.do_post(send_datas)\n pnic_before = get_net_io_counters()\n\n def run(self):\n sys.stdout.write('Daemon started with pid %s\\n' % os.getpid())\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n p = psutil.Process(_p.pid)\n while 1:\n current_cpu = p.cpu_percent()\n current_mem = p.memory_percent()\n if p.is_running() and (current_mem > 1 or current_cpu > 1):\n p.terminate()\n p.wait()\n with open('/tmp/test_daemon.log', 'a') as f:\n f.write('CPU: %s - MEM: %s - at: %s\\n' % (current_cpu,\n current_mem, time.ctime()))\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n sys.stdout.write('The subprocess restart pid %s\\n' % _p.pid)\n p = psutil.Process(_p.pid)\n time.sleep(60)\n",
"step-5": "import os\nimport sys\nimport time\nimport json\nimport socket\nfrom urllib import request, parse\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing import Process\n\nimport psutil\n\nfrom daemon import DaemonBase\nfrom host_performence import *\n\n\nclass MyDaemon(DaemonBase):\n \"\"\"Real Daemon class\"\"\"\n\n def __init__(self,\n api_url,\n monitor_port,\n pidfile,\n stdin='/dev/null',\n stdout='/dev/null',\n stderr='/dev/null'):\n self.api_url = api_url\n self.monitor_port = monitor_port\n super().__init__(pidfile, stdin, stdout, stderr)\n\n @staticmethod\n def get_host_addrs(family):\n for nic, snics in psutil.net_if_addrs().items():\n for snic in snics:\n if snic.family == family:\n yield (nic, snic.address)\n\n def do_post(self, params):\n data = json.dumps(params)\n # Json Post\n # headers = {'Content-Type': 'application/json'}\n # req = request.Request(self.api_url, data=data.encode('utf-8'), headers=headers) \n # Form Post eg. ?data=params&code=1\n data = parse.urlencode({'data': data})\n req = request.Request(self.api_url, data=data.encode('utf-8'))\n try:\n with request.urlopen(req, timeout=3) as resp:\n # print(resp.read().decode('utf-8'))\n return resp.status\n except Exception as e:\n with open('/tmp/test_daemon.err', 'a') as f:\n print('%s at: %s' % (e, time.ctime()), file=f)\n\n def tasks(self):\n pnic_before = get_net_io_counters()\n while 1:\n time.sleep(60)\n pnic_after = get_net_io_counters()\n send_datas = {\n 'type': 8,\n 'ip_addr': ''.join([\n n[1] for n in self.get_host_addrs(socket.AF_INET)\n if n[0] == self.monitor_port\n ]),\n 'cpu_perf': get_cpu_percent(),\n 'mem_perf': get_mem_usage(),\n 'disk_perf': get_disk_usage(),\n 'disk_speed': get_disk_speed(),\n 'net_perf': get_network_traffic(pnic_before, pnic_after)\n }\n self.do_post(send_datas)\n pnic_before = get_net_io_counters()\n\n def run(self):\n sys.stdout.write('Daemon started with pid %s\\n' % os.getpid())\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n p = psutil.Process(_p.pid)\n while 1:\n current_cpu = p.cpu_percent()\n current_mem = p.memory_percent()\n # print(current_cpu, current_mem, time.ctime(), p.pid, p.ppid())\n if p.is_running() and (current_mem > 1 or current_cpu > 1):\n p.terminate()\n p.wait()\n with open('/tmp/test_daemon.log', 'a') as f:\n f.write('CPU: %s - MEM: %s - at: %s\\n' %\n (current_cpu, current_mem, time.ctime()))\n _p = Process(target=self.tasks, daemon=True)\n _p.start()\n sys.stdout.write('The subprocess restart pid %s\\n' % _p.pid)\n p = psutil.Process(_p.pid)\n time.sleep(60)",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
class WindowFeatureExtractor(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def transform(self, X, y=None):
return self.vectorizer.transform(X, y)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WindowFeatureExtractor(object):
"""
A simple wrapper class that takes a number of window based feature extractor
functions and applies them to a dataset of windows, and then vectorizes with
the sklearn DictVectorizer class
"""
def __init__(self, feature_extractors, min_feat_frequency, sparse=True,
feature_val=1):
"""
feature_extractors : list of fns
feature extraction fns
min_feat_frequency : int
minimum frequency of features to retain
sparse : boolean
return a sparse numpy matrix or not
"""
self.feature_extractors = feature_extractors
self.min_feat_frequency = min_feat_frequency
self.vectorizer = DictVectorizer(sparse=sparse)
self.feature_val = feature_val
def fit(self, X, y=None):
"""
X : list of list of str
list of word windows
y : ignored
returns : numpy array (sparse is sparse = True)
"""
feats = self.__extract_features_(X)
return self.vectorizer.fit(feats)
def transform(self, X, y=None):
return self.vectorizer.transform(X, y)
def fit_transform(self, X, y=None):
feats = self.__extract_features_(X)
return self.vectorizer.fit_transform(feats)
def __extract_features_(self, X):
if len(X) == 0:
raise Exception('Empty list passed to WindowFeatureExtractor.fit')
mid_ix = compute_middle_index(X[0])
all_feats = []
keys = []
for window in X:
d = {}
for fn in self.feature_extractors:
fts = fn(window, mid_ix, self.feature_val)
d.update(fts)
keys.extend(d.keys())
all_feats.append(d)
if self.min_feat_frequency <= 1:
return all_feats
""" Filter to at or above minimum feature frequency """
keyCnt = Counter(keys)
frequent = set([k for k, v in keyCnt.items() if v >= self.
min_feat_frequency])
freq_feats = []
for d in all_feats:
freq_d = dict([(k, v) for k, v in d.items() if k in frequent])
freq_feats.append(freq_d)
return freq_feats
<|reserved_special_token_1|>
__author__ = 'simon.hughes'
<|reserved_special_token_0|>
class WindowFeatureExtractor(object):
"""
A simple wrapper class that takes a number of window based feature extractor
functions and applies them to a dataset of windows, and then vectorizes with
the sklearn DictVectorizer class
"""
def __init__(self, feature_extractors, min_feat_frequency, sparse=True,
feature_val=1):
"""
feature_extractors : list of fns
feature extraction fns
min_feat_frequency : int
minimum frequency of features to retain
sparse : boolean
return a sparse numpy matrix or not
"""
self.feature_extractors = feature_extractors
self.min_feat_frequency = min_feat_frequency
self.vectorizer = DictVectorizer(sparse=sparse)
self.feature_val = feature_val
def fit(self, X, y=None):
"""
X : list of list of str
list of word windows
y : ignored
returns : numpy array (sparse is sparse = True)
"""
feats = self.__extract_features_(X)
return self.vectorizer.fit(feats)
def transform(self, X, y=None):
return self.vectorizer.transform(X, y)
def fit_transform(self, X, y=None):
feats = self.__extract_features_(X)
return self.vectorizer.fit_transform(feats)
def __extract_features_(self, X):
if len(X) == 0:
raise Exception('Empty list passed to WindowFeatureExtractor.fit')
mid_ix = compute_middle_index(X[0])
all_feats = []
keys = []
for window in X:
d = {}
for fn in self.feature_extractors:
fts = fn(window, mid_ix, self.feature_val)
d.update(fts)
keys.extend(d.keys())
all_feats.append(d)
if self.min_feat_frequency <= 1:
return all_feats
""" Filter to at or above minimum feature frequency """
keyCnt = Counter(keys)
frequent = set([k for k, v in keyCnt.items() if v >= self.
min_feat_frequency])
freq_feats = []
for d in all_feats:
freq_d = dict([(k, v) for k, v in d.items() if k in frequent])
freq_feats.append(freq_d)
return freq_feats
<|reserved_special_token_1|>
__author__ = 'simon.hughes'
from sklearn.feature_extraction import DictVectorizer
from WindowFeatures import compute_middle_index
from collections import Counter
class WindowFeatureExtractor(object):
"""
A simple wrapper class that takes a number of window based feature extractor
functions and applies them to a dataset of windows, and then vectorizes with
the sklearn DictVectorizer class
"""
def __init__(self, feature_extractors, min_feat_frequency, sparse=True,
feature_val=1):
"""
feature_extractors : list of fns
feature extraction fns
min_feat_frequency : int
minimum frequency of features to retain
sparse : boolean
return a sparse numpy matrix or not
"""
self.feature_extractors = feature_extractors
self.min_feat_frequency = min_feat_frequency
self.vectorizer = DictVectorizer(sparse=sparse)
self.feature_val = feature_val
def fit(self, X, y=None):
"""
X : list of list of str
list of word windows
y : ignored
returns : numpy array (sparse is sparse = True)
"""
feats = self.__extract_features_(X)
return self.vectorizer.fit(feats)
def transform(self, X, y=None):
return self.vectorizer.transform(X, y)
def fit_transform(self, X, y=None):
feats = self.__extract_features_(X)
return self.vectorizer.fit_transform(feats)
def __extract_features_(self, X):
if len(X) == 0:
raise Exception('Empty list passed to WindowFeatureExtractor.fit')
mid_ix = compute_middle_index(X[0])
all_feats = []
keys = []
for window in X:
d = {}
for fn in self.feature_extractors:
fts = fn(window, mid_ix, self.feature_val)
d.update(fts)
keys.extend(d.keys())
all_feats.append(d)
if self.min_feat_frequency <= 1:
return all_feats
""" Filter to at or above minimum feature frequency """
keyCnt = Counter(keys)
frequent = set([k for k, v in keyCnt.items() if v >= self.
min_feat_frequency])
freq_feats = []
for d in all_feats:
freq_d = dict([(k, v) for k, v in d.items() if k in frequent])
freq_feats.append(freq_d)
return freq_feats
<|reserved_special_token_1|>
__author__ = 'simon.hughes'
from sklearn.feature_extraction import DictVectorizer
from WindowFeatures import compute_middle_index
from collections import Counter
class WindowFeatureExtractor(object):
"""
A simple wrapper class that takes a number of window based feature extractor
functions and applies them to a dataset of windows, and then vectorizes with
the sklearn DictVectorizer class
"""
def __init__(self, feature_extractors, min_feat_frequency, sparse=True, feature_val=1):
"""
feature_extractors : list of fns
feature extraction fns
min_feat_frequency : int
minimum frequency of features to retain
sparse : boolean
return a sparse numpy matrix or not
"""
self.feature_extractors = feature_extractors
self.min_feat_frequency = min_feat_frequency
self.vectorizer = DictVectorizer(sparse=sparse)
self.feature_val = feature_val
def fit(self, X, y=None):
"""
X : list of list of str
list of word windows
y : ignored
returns : numpy array (sparse is sparse = True)
"""
feats = self.__extract_features_(X)
return self.vectorizer.fit(feats)
def transform(self, X, y=None):
return self.vectorizer.transform(X, y)
def fit_transform(self, X,y=None):
feats = self.__extract_features_(X)
return self.vectorizer.fit_transform(feats)
def __extract_features_(self, X):
if len(X) == 0:
raise Exception("Empty list passed to WindowFeatureExtractor.fit")
mid_ix = compute_middle_index(X[0])
all_feats = []
keys = []
for window in X:
d = {}
for fn in self.feature_extractors:
fts = fn(window, mid_ix, self.feature_val)
d.update(fts)
keys.extend(d.keys())
all_feats.append(d)
if self.min_feat_frequency <= 1:
return all_feats
""" Filter to at or above minimum feature frequency """
keyCnt = Counter(keys)
frequent = set([k for k,v in keyCnt.items() if v >= self.min_feat_frequency])
freq_feats = []
for d in all_feats:
freq_d = dict([(k,v) for k,v in d.items() if k in frequent])
freq_feats.append(freq_d)
return freq_feats
|
flexible
|
{
"blob_id": "48677d73f6489ce789884a9dff5d50c23f47d8b3",
"index": 260,
"step-1": "<mask token>\n\n\nclass WindowFeatureExtractor(object):\n <mask token>\n <mask token>\n <mask token>\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True,\n feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X, y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception('Empty list passed to WindowFeatureExtractor.fit')\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n if self.min_feat_frequency <= 1:\n return all_feats\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k, v in keyCnt.items() if v >= self.\n min_feat_frequency])\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k, v) for k, v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats\n",
"step-3": "__author__ = 'simon.hughes'\n<mask token>\n\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True,\n feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X, y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception('Empty list passed to WindowFeatureExtractor.fit')\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n if self.min_feat_frequency <= 1:\n return all_feats\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k, v in keyCnt.items() if v >= self.\n min_feat_frequency])\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k, v) for k, v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats\n",
"step-4": "__author__ = 'simon.hughes'\nfrom sklearn.feature_extraction import DictVectorizer\nfrom WindowFeatures import compute_middle_index\nfrom collections import Counter\n\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True,\n feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X, y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception('Empty list passed to WindowFeatureExtractor.fit')\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n if self.min_feat_frequency <= 1:\n return all_feats\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k, v in keyCnt.items() if v >= self.\n min_feat_frequency])\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k, v) for k, v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats\n",
"step-5": "__author__ = 'simon.hughes'\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom WindowFeatures import compute_middle_index\nfrom collections import Counter\n\nclass WindowFeatureExtractor(object):\n \"\"\"\n A simple wrapper class that takes a number of window based feature extractor\n functions and applies them to a dataset of windows, and then vectorizes with\n the sklearn DictVectorizer class\n \"\"\"\n\n def __init__(self, feature_extractors, min_feat_frequency, sparse=True, feature_val=1):\n \"\"\"\n feature_extractors : list of fns\n feature extraction fns\n min_feat_frequency : int\n minimum frequency of features to retain\n sparse : boolean\n return a sparse numpy matrix or not\n \"\"\"\n self.feature_extractors = feature_extractors\n self.min_feat_frequency = min_feat_frequency\n self.vectorizer = DictVectorizer(sparse=sparse)\n self.feature_val = feature_val\n\n def fit(self, X, y=None):\n \"\"\"\n X : list of list of str\n list of word windows\n y : ignored\n\n returns : numpy array (sparse is sparse = True)\n \"\"\"\n feats = self.__extract_features_(X)\n return self.vectorizer.fit(feats)\n\n def transform(self, X, y=None):\n return self.vectorizer.transform(X, y)\n\n def fit_transform(self, X,y=None):\n feats = self.__extract_features_(X)\n return self.vectorizer.fit_transform(feats)\n\n def __extract_features_(self, X):\n if len(X) == 0:\n raise Exception(\"Empty list passed to WindowFeatureExtractor.fit\")\n mid_ix = compute_middle_index(X[0])\n all_feats = []\n\n keys = []\n for window in X:\n d = {}\n for fn in self.feature_extractors:\n fts = fn(window, mid_ix, self.feature_val)\n d.update(fts)\n keys.extend(d.keys())\n all_feats.append(d)\n\n if self.min_feat_frequency <= 1:\n return all_feats\n\n \"\"\" Filter to at or above minimum feature frequency \"\"\"\n keyCnt = Counter(keys)\n frequent = set([k for k,v in keyCnt.items() if v >= self.min_feat_frequency])\n\n freq_feats = []\n for d in all_feats:\n freq_d = dict([(k,v) for k,v in d.items() if k in frequent])\n freq_feats.append(freq_d)\n return freq_feats",
"step-ids": [
2,
7,
8,
9,
10
]
}
|
[
2,
7,
8,
9,
10
] |
""" Soil and water decomposition rates """
import math
from water_balance import WaterBalance
from utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip
__author__ = "Martin De Kauwe"
__version__ = "1.0 (25.02.2011)"
__email__ = "[email protected]"
class DecompFactors(object):
""" Calculate C and N litter production rates """
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state,
self.fluxes, self.met_data)
def decay_rates(self, project_day):
""" Model decay rates - temperature dependency (i.e. increase with temp)
[See section A8 in Comins and McMurtrie 1993].
Parameters:
-----------
project_day : int
current simulation day (index)
"""
# temperature and water factors for decomposition
tempact = self.soil_temp_factor(project_day)
wtfac = self.wb.calculate_soil_water_fac(topsoil=True)
# decay rate of surface structural pool
self.params.decayrate[0] = (self.params.kdec1 *
math.exp(-3. * self.params.ligshoot) *
tempact * wtfac)
# decay rate of surface metabolic pool
self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac
# decay rate of soil structural pool
self.params.decayrate[2] = (self.params.kdec3 *
math.exp(-3. * self.params.ligroot) *
tempact * wtfac)
# decay rate of soil metabolic pool
self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac
# decay rate of active pool
self.params.decayrate[4] = (self.params.kdec5 *
(1.0 - 0.75 * self.params.finesoil) *
tempact * wtfac)
# decay rate of slow pool
self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac
# decay rate of passive pool
self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac
def soil_temp_factor(self, project_day):
"""Soil-temperature activity factor (A9).
Parameters:
-----------
project_day : int
current simulation day (index)
Returns:
--------
tfac : float
soil temperature factor [degC]
"""
tsoil = self.met_data['tsoil'][project_day]
if float_gt(tsoil, 0.0):
tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)
if float_lt(tfac, 0.0):
tfac = 0.0
else:
# negative number cannot be raised to a fractional power
# number would need to be complex
tfac = 0.0
return tfac
|
normal
|
{
"blob_id": "74f3b4001a0520a25a314ff537719b679ba0fca4",
"index": 2578,
"step-1": "<mask token>\n\n\nclass DecompFactors(object):\n <mask token>\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-3": "<mask token>\n__author__ = 'Martin De Kauwe'\n__version__ = '1.0 (25.02.2011)'\n__email__ = '[email protected]'\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-4": "<mask token>\nimport math\nfrom water_balance import WaterBalance\nfrom utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip\n__author__ = 'Martin De Kauwe'\n__version__ = '1.0 (25.02.2011)'\n__email__ = '[email protected]'\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-5": "\"\"\" Soil and water decomposition rates \"\"\"\n\nimport math\n\nfrom water_balance import WaterBalance\nfrom utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip\n\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (25.02.2011)\"\n__email__ = \"[email protected]\"\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n\n self.wb = WaterBalance(self.control, self.params, self.state,\n self.fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n # temperature and water factors for decomposition\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n\n # decay rate of surface structural pool\n self.params.decayrate[0] = (self.params.kdec1 *\n math.exp(-3. * self.params.ligshoot) *\n tempact * wtfac)\n\n # decay rate of surface metabolic pool\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n\n\n # decay rate of soil structural pool\n self.params.decayrate[2] = (self.params.kdec3 *\n math.exp(-3. * self.params.ligroot) *\n tempact * wtfac)\n\n # decay rate of soil metabolic pool\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n\n\n # decay rate of active pool\n self.params.decayrate[4] = (self.params.kdec5 *\n (1.0 - 0.75 * self.params.finesoil) *\n tempact * wtfac)\n\n # decay rate of slow pool\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n\n # decay rate of passive pool\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
from django.db import models
from django.contrib.auth.models import User, Group
from userena.models import UserenaBaseProfile
from django.db.models.signals import post_save
from tastypie.models import create_api_key
class UserProfile(UserenaBaseProfile):
# user reference
user = models.OneToOneField(User)
facebook_id = models.CharField(max_length = 128, blank = True, null = True)
class Meta:
permissions = (
('change_profile', 'Change profile'),
('view_profile', 'View profile'),
('delete_profile', 'Delete profile'),
)
def create_user_profile(sender, instance, created, **kwargs):
"""
Create user profie and set the permissions
"""
if created and instance.pk >= 0:
UserProfile.objects.create(user=instance)
# get default group, but not for anonymous
try:
default_group = Group.objects.get(name = "default_users")
instance.groups.add(default_group)
except:
pass
post_save.connect(create_user_profile, sender=User)
# generate api key for the user when the user is created
post_save.connect(create_api_key, sender=User)
|
normal
|
{
"blob_id": "6e6f153857879da625f57f0382f1997fcae4f6c8",
"index": 6041,
"step-1": "<mask token>\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n try:\n default_group = Group.objects.get(name='default_users')\n instance.groups.add(default_group)\n except:\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n try:\n default_group = Group.objects.get(name='default_users')\n instance.groups.add(default_group)\n except:\n pass\n\n\npost_save.connect(create_user_profile, sender=User)\npost_save.connect(create_api_key, sender=User)\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User, Group\nfrom userena.models import UserenaBaseProfile\nfrom django.db.models.signals import post_save\nfrom tastypie.models import create_api_key\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n try:\n default_group = Group.objects.get(name='default_users')\n instance.groups.add(default_group)\n except:\n pass\n\n\npost_save.connect(create_user_profile, sender=User)\npost_save.connect(create_api_key, sender=User)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User, Group\nfrom userena.models import UserenaBaseProfile\nfrom django.db.models.signals import post_save\nfrom tastypie.models import create_api_key\n\nclass UserProfile(UserenaBaseProfile):\n # user reference\n user = models.OneToOneField(User)\n \n facebook_id = models.CharField(max_length = 128, blank = True, null = True)\n \n class Meta:\n permissions = (\n ('change_profile', 'Change profile'),\n ('view_profile', 'View profile'),\n ('delete_profile', 'Delete profile'),\n )\n \ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n \n # get default group, but not for anonymous\n try:\n default_group = Group.objects.get(name = \"default_users\")\n instance.groups.add(default_group)\n except:\n pass\n \npost_save.connect(create_user_profile, sender=User)\n\n# generate api key for the user when the user is created\npost_save.connect(create_api_key, sender=User)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def connect(hostip=hostip, hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
while True:
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
return response.decode('UTF-8')
def WIN_read_socket(server):
sockets_list = [server]
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
while True:
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect(hostip=hostip, hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
while True:
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
return response.decode('UTF-8')
def WIN_read_socket(server):
sockets_list = [server]
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
while True:
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
try:
if str(sys.argv[1]) == 'terminal':
if str(sys.argv[2]) == 'windows':
from threading import *
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
Thread(target=WIN_read_socket, args=(server,)).start()
Thread(target=WIN_write_socket, args=(server,)).start()
else:
terminal_mode()
except:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hostip = 'localhost'
hostport = 8089
def connect(hostip=hostip, hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
while True:
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
return response.decode('UTF-8')
def WIN_read_socket(server):
sockets_list = [server]
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
while True:
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
try:
if str(sys.argv[1]) == 'terminal':
if str(sys.argv[2]) == 'windows':
from threading import *
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
Thread(target=WIN_read_socket, args=(server,)).start()
Thread(target=WIN_write_socket, args=(server,)).start()
else:
terminal_mode()
except:
pass
<|reserved_special_token_1|>
import socket
import select
import sys, os
from contextlib import contextmanager
hostip = 'localhost'
hostport = 8089
def connect(hostip=hostip, hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
while True:
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
return response.decode('UTF-8')
def WIN_read_socket(server):
sockets_list = [server]
read_sockets, write_socket, error_socket = select.select(sockets_list,
[], [])
while True:
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write('[Server]: ' + message.decode('UTF-8'))
sys.stdout.write('\n\n[You]: ')
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
try:
if str(sys.argv[1]) == 'terminal':
if str(sys.argv[2]) == 'windows':
from threading import *
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n', 'UTF-8'))
Thread(target=WIN_read_socket, args=(server,)).start()
Thread(target=WIN_write_socket, args=(server,)).start()
else:
terminal_mode()
except:
pass
<|reserved_special_token_1|>
##
#Author: Stephen
##
import socket
import select
import sys, os
from contextlib import contextmanager
hostip = 'localhost'
hostport = 8089
def connect(hostip=hostip,hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n','UTF-8'))
while True:
# maintains a list of possible input streams
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write("[Server]: "+message.decode("UTF-8"))
sys.stdout.write("\n\n[You]: ")
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)#supress welcome message
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
#print(response.decode("UTF-8"))
return response.decode("UTF-8")
def WIN_read_socket(server):
sockets_list = [server] #ONLY THIS IS DIFFERENT
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
while True:
for socks in read_sockets:
if socks == server:
# maintains a list of possible input streams
message = socks.recv(2048)
sys.stdout.write("[Server]: "+message.decode("UTF-8"))
sys.stdout.write("\n\n[You]: ")
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
try:
if str(sys.argv[1]) == 'terminal':
if str(sys.argv[2]) == 'windows':
from threading import *
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n','UTF-8'))
Thread(target=WIN_read_socket, args=(server,)).start()
Thread(target=WIN_write_socket, args=(server,)).start()
else:
terminal_mode()
except:
pass
|
flexible
|
{
"blob_id": "5cdf8cd4bfebb9aab2e8f421047fc1ba3190d566",
"index": 3451,
"step-1": "<mask token>\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start()\n Thread(target=WIN_write_socket, args=(server,)).start()\n else:\n terminal_mode()\nexcept:\n pass\n",
"step-3": "<mask token>\nhostip = 'localhost'\nhostport = 8089\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start()\n Thread(target=WIN_write_socket, args=(server,)).start()\n else:\n terminal_mode()\nexcept:\n pass\n",
"step-4": "import socket\nimport select\nimport sys, os\nfrom contextlib import contextmanager\nhostip = 'localhost'\nhostport = 8089\n\n\ndef connect(hostip=hostip, hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n while True:\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n print('Connection Closed.')\n server.close()\n\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n return response.decode('UTF-8')\n\n\ndef WIN_read_socket(server):\n sockets_list = [server]\n read_sockets, write_socket, error_socket = select.select(sockets_list,\n [], [])\n while True:\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write('[Server]: ' + message.decode('UTF-8'))\n sys.stdout.write('\\n\\n[You]: ')\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n\n\ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n\ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n', 'UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start()\n Thread(target=WIN_write_socket, args=(server,)).start()\n else:\n terminal_mode()\nexcept:\n pass\n",
"step-5": "##\n#Author: Stephen\n##\nimport socket\nimport select\nimport sys, os\nfrom contextlib import contextmanager\n\nhostip = 'localhost'\nhostport = 8089\n\ndef connect(hostip=hostip,hostport=hostport):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n IP_address = hostip\n Port = hostport\n server.connect((IP_address, Port))\n return server\n\ndef terminal_mode():\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n','UTF-8'))\n\n while True:\n # maintains a list of possible input streams\n sockets_list = [sys.stdin, server]\n \"\"\" There are two possible input situations. Either the\n user wants to give manual input to send to other people,\n or the server is sending a message to be printed on the\n screen. Select returns from sockets_list, the stream that\n is reader for input. So for example, if the server wants\n to send a message, then the if condition will hold true\n below.If the user wants to send a message, the else\n condition will evaluate as true\"\"\"\n read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])\n\n for socks in read_sockets:\n if socks == server:\n message = socks.recv(2048)\n sys.stdout.write(\"[Server]: \"+message.decode(\"UTF-8\"))\n sys.stdout.write(\"\\n\\n[You]: \")\n sys.stdout.flush()\n else:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n\n print('Connection Closed.')\n server.close()\n\ndef send_command(message):\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect((hostip, hostport))\n clientsocket.recv(2048)#supress welcome message\n clientsocket.send(bytes(message, 'UTF-8'))\n response = clientsocket.recv(2048)\n clientsocket.close()\n #print(response.decode(\"UTF-8\"))\n return response.decode(\"UTF-8\")\n\ndef WIN_read_socket(server):\n sockets_list = [server] #ONLY THIS IS DIFFERENT\n read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])\n while True:\n for socks in read_sockets:\n if socks == server:\n # maintains a list of possible input streams\n message = socks.recv(2048)\n sys.stdout.write(\"[Server]: \"+message.decode(\"UTF-8\"))\n sys.stdout.write(\"\\n\\n[You]: \")\n sys.stdout.flush()\n print('Connection Closed.')\n server.close()\n \ndef WIN_write_socket(server):\n while True:\n message = sys.stdin.readline()\n if message == 'exit':\n return\n else:\n server.send(bytes(message, 'UTF-8'))\n \ntry:\n if str(sys.argv[1]) == 'terminal':\n if str(sys.argv[2]) == 'windows':\n from threading import *\n server = connect()\n server.send(bytes('Connected via Terminal. Hello!\\n','UTF-8'))\n Thread(target=WIN_read_socket, args=(server,)).start() \n Thread(target=WIN_write_socket, args=(server,)).start() \n else:\n terminal_mode()\nexcept:\n pass\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _insert_grace_notes(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) != Segment:
continue
segment = pe
initial_len = len(segment.notes)
new_notes = []
flag = False
for i in range(len(pe.notes)):
if segment.notes[i].grace and not flag:
new_note = Note(pitch=phrase.scale.skip_up(segment.
notes[i].pitch, 1), new=True, duration=1 / 4)
new_notes += [new_note]
segment.notes[i].duration -= 1 / 4
flag = True
new_notes += [dataclasses.replace(segment.notes[i])]
assert len(new_notes) - initial_len <= 1
pe.notes = list(new_notes)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _peaks(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) == Segment:
if pe.direction != SegmentDirection.UPDOWN:
continue
for i in range(1, len(pe.notes)):
if pe.notes[i].pitch < pe.notes[i - 1].pitch:
pe.notes[i - 1].grace = True
print('sup', file=sys.stderr)
break
def _consonant(song):
pass
def _insert_grace_notes(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) != Segment:
continue
segment = pe
initial_len = len(segment.notes)
new_notes = []
flag = False
for i in range(len(pe.notes)):
if segment.notes[i].grace and not flag:
new_note = Note(pitch=phrase.scale.skip_up(segment.
notes[i].pitch, 1), new=True, duration=1 / 4)
new_notes += [new_note]
segment.notes[i].duration -= 1 / 4
flag = True
new_notes += [dataclasses.replace(segment.notes[i])]
assert len(new_notes) - initial_len <= 1
pe.notes = list(new_notes)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _peaks(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) == Segment:
if pe.direction != SegmentDirection.UPDOWN:
continue
for i in range(1, len(pe.notes)):
if pe.notes[i].pitch < pe.notes[i - 1].pitch:
pe.notes[i - 1].grace = True
print('sup', file=sys.stderr)
break
def _consonant(song):
pass
def _insert_grace_notes(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) != Segment:
continue
segment = pe
initial_len = len(segment.notes)
new_notes = []
flag = False
for i in range(len(pe.notes)):
if segment.notes[i].grace and not flag:
new_note = Note(pitch=phrase.scale.skip_up(segment.
notes[i].pitch, 1), new=True, duration=1 / 4)
new_notes += [new_note]
segment.notes[i].duration -= 1 / 4
flag = True
new_notes += [dataclasses.replace(segment.notes[i])]
assert len(new_notes) - initial_len <= 1
pe.notes = list(new_notes)
def add_grace_notes(song):
_peaks(song)
_insert_grace_notes(song)
<|reserved_special_token_1|>
import sys
from melody_types import *
import dataclasses
"""
Marks notes for grace notes
"""
# Mark grace notes on the peak note of every segment
def _peaks(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) == Segment:
if pe.direction != SegmentDirection.UPDOWN:
continue
# Get peak note
for i in range(1, len(pe.notes)):
if pe.notes[i].pitch < pe.notes[i - 1].pitch:
pe.notes[i - 1].grace = True
print('sup', file=sys.stderr)
break
# Adds a grace note to consonant notes in every segment
def _consonant(song):
pass
def _insert_grace_notes(song):
for phrase in song.phrases:
for pe in phrase.phrase_elements:
if type(pe) != Segment:
continue
segment = pe
initial_len = len(segment.notes)
new_notes = []
flag = False
for i in range(len(pe.notes)):
if segment.notes[i].grace and not flag:
new_note = Note(pitch=phrase.scale.skip_up(segment.notes[i].pitch, 1), new=True, duration=1/4)
new_notes += [new_note]
segment.notes[i].duration -= 1/4
flag = True
new_notes += [dataclasses.replace(segment.notes[i])]
assert(len(new_notes) - initial_len <= 1)
pe.notes = list(new_notes)
def add_grace_notes(song):
_peaks(song)
_insert_grace_notes(song)
|
flexible
|
{
"blob_id": "ac83d7d39319c08c35302abfb312ebee463b75b2",
"index": 5130,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _insert_grace_notes(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) != Segment:\n continue\n segment = pe\n initial_len = len(segment.notes)\n new_notes = []\n flag = False\n for i in range(len(pe.notes)):\n if segment.notes[i].grace and not flag:\n new_note = Note(pitch=phrase.scale.skip_up(segment.\n notes[i].pitch, 1), new=True, duration=1 / 4)\n new_notes += [new_note]\n segment.notes[i].duration -= 1 / 4\n flag = True\n new_notes += [dataclasses.replace(segment.notes[i])]\n assert len(new_notes) - initial_len <= 1\n pe.notes = list(new_notes)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _peaks(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) == Segment:\n if pe.direction != SegmentDirection.UPDOWN:\n continue\n for i in range(1, len(pe.notes)):\n if pe.notes[i].pitch < pe.notes[i - 1].pitch:\n pe.notes[i - 1].grace = True\n print('sup', file=sys.stderr)\n break\n\n\ndef _consonant(song):\n pass\n\n\ndef _insert_grace_notes(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) != Segment:\n continue\n segment = pe\n initial_len = len(segment.notes)\n new_notes = []\n flag = False\n for i in range(len(pe.notes)):\n if segment.notes[i].grace and not flag:\n new_note = Note(pitch=phrase.scale.skip_up(segment.\n notes[i].pitch, 1), new=True, duration=1 / 4)\n new_notes += [new_note]\n segment.notes[i].duration -= 1 / 4\n flag = True\n new_notes += [dataclasses.replace(segment.notes[i])]\n assert len(new_notes) - initial_len <= 1\n pe.notes = list(new_notes)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef _peaks(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) == Segment:\n if pe.direction != SegmentDirection.UPDOWN:\n continue\n for i in range(1, len(pe.notes)):\n if pe.notes[i].pitch < pe.notes[i - 1].pitch:\n pe.notes[i - 1].grace = True\n print('sup', file=sys.stderr)\n break\n\n\ndef _consonant(song):\n pass\n\n\ndef _insert_grace_notes(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) != Segment:\n continue\n segment = pe\n initial_len = len(segment.notes)\n new_notes = []\n flag = False\n for i in range(len(pe.notes)):\n if segment.notes[i].grace and not flag:\n new_note = Note(pitch=phrase.scale.skip_up(segment.\n notes[i].pitch, 1), new=True, duration=1 / 4)\n new_notes += [new_note]\n segment.notes[i].duration -= 1 / 4\n flag = True\n new_notes += [dataclasses.replace(segment.notes[i])]\n assert len(new_notes) - initial_len <= 1\n pe.notes = list(new_notes)\n\n\ndef add_grace_notes(song):\n _peaks(song)\n _insert_grace_notes(song)\n",
"step-5": "import sys\nfrom melody_types import *\nimport dataclasses\n\"\"\"\nMarks notes for grace notes\n\"\"\"\n\n# Mark grace notes on the peak note of every segment\ndef _peaks(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) == Segment:\n if pe.direction != SegmentDirection.UPDOWN:\n continue\n\n # Get peak note\n for i in range(1, len(pe.notes)):\n if pe.notes[i].pitch < pe.notes[i - 1].pitch:\n pe.notes[i - 1].grace = True\n print('sup', file=sys.stderr)\n break\n\n# Adds a grace note to consonant notes in every segment\ndef _consonant(song):\n pass\n\ndef _insert_grace_notes(song):\n for phrase in song.phrases:\n for pe in phrase.phrase_elements:\n if type(pe) != Segment:\n continue\n\n segment = pe\n initial_len = len(segment.notes)\n\n new_notes = []\n flag = False\n for i in range(len(pe.notes)):\n if segment.notes[i].grace and not flag:\n new_note = Note(pitch=phrase.scale.skip_up(segment.notes[i].pitch, 1), new=True, duration=1/4)\n new_notes += [new_note]\n segment.notes[i].duration -= 1/4\n flag = True\n new_notes += [dataclasses.replace(segment.notes[i])]\n\n assert(len(new_notes) - initial_len <= 1)\n pe.notes = list(new_notes)\n\ndef add_grace_notes(song):\n _peaks(song)\n _insert_grace_notes(song)\n",
"step-ids": [
0,
1,
3,
4,
6
]
}
|
[
0,
1,
3,
4,
6
] |
numbers = [1, 1, 1, 1, 1]
new_numbers = [2, 2, 2, 3, 3]
print(numbers + new_numbers)
print(numbers * 5)
|
normal
|
{
"blob_id": "843df062702c9abf34cf14d911d927d786f1d912",
"index": 1573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(numbers + new_numbers)\nprint(numbers * 5)\n",
"step-3": "numbers = [1, 1, 1, 1, 1]\nnew_numbers = [2, 2, 2, 3, 3]\nprint(numbers + new_numbers)\nprint(numbers * 5)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os, re
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.sites',
'maintenancemode',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'maintenancemode.middleware.MaintenanceModeMiddleware',
)
ROOT_URLCONF = 'maintenancemode.tests'
SITE_ID = 1
MAINTENANCE_MODE = True # or ``False`` and use ``maintenance`` command
MAINTENANCE_IGNORE_URLS = (
re.compile(r'^/ignored.*'),
)
|
normal
|
{
"blob_id": "34ecf2bd9bc72a98aba4584880a198dd24899dbe",
"index": 6218,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n ':memory:'}}\nINSTALLED_APPS = ('django.contrib.auth', 'django.contrib.admin',\n 'django.contrib.sessions', 'django.contrib.contenttypes',\n 'django.contrib.sites', 'maintenancemode')\nMIDDLEWARE = ('django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'maintenancemode.middleware.MaintenanceModeMiddleware')\nROOT_URLCONF = 'maintenancemode.tests'\nSITE_ID = 1\nMAINTENANCE_MODE = True\nMAINTENANCE_IGNORE_URLS = re.compile('^/ignored.*'),\n",
"step-3": "import os, re\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n ':memory:'}}\nINSTALLED_APPS = ('django.contrib.auth', 'django.contrib.admin',\n 'django.contrib.sessions', 'django.contrib.contenttypes',\n 'django.contrib.sites', 'maintenancemode')\nMIDDLEWARE = ('django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'maintenancemode.middleware.MaintenanceModeMiddleware')\nROOT_URLCONF = 'maintenancemode.tests'\nSITE_ID = 1\nMAINTENANCE_MODE = True\nMAINTENANCE_IGNORE_URLS = re.compile('^/ignored.*'),\n",
"step-4": "import os, re\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:'\n }\n}\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.admin',\n 'django.contrib.sessions',\n 'django.contrib.contenttypes',\n 'django.contrib.sites',\n\n 'maintenancemode',\n)\n\nMIDDLEWARE = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n\n 'maintenancemode.middleware.MaintenanceModeMiddleware',\n)\n\nROOT_URLCONF = 'maintenancemode.tests'\n\nSITE_ID = 1\n\nMAINTENANCE_MODE = True # or ``False`` and use ``maintenance`` command\nMAINTENANCE_IGNORE_URLS = (\n re.compile(r'^/ignored.*'),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import datetime
import time
from tqdm import tqdm
import json
import logging
logging.basicConfig(filename='logo.log', level=logging.DEBUG, filemode='w')
logging.debug('debug message')
logging.info('info message')
# from pprint import pprint
id_vk = input('введите id пользователя вк: ')
token_vk = input('введите токен вк: ')
url = 'https://api.vk.com/method/photos.get'
params = {'user_id': id_vk, 'access_token': token_vk, 'v': '5.131','album_id': 'profile', 'extended': '1', 'photo_sizes': '1'}
res = requests.get(url, params=params)
# pprint(res.json())
token_ya = input('введите токен Yandex: ')
def ya_headers():
return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'.format(token_ya)}
def put_folder(path):
url = 'https://cloud-api.yandex.net/v1/disk/resources/'
headers = ya_headers()
params = {'path': path, 'url': url}
response = requests.put(url,headers = headers, params = params)
if response.status_code == 201:
print('папка создана')
elif response.status_code == 409:
print('Папка уже существует. Файлы будут помещены в неё.')
return path
def post_file(file_url, file_name):
upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'
headers = ya_headers()
params = {'path': f'/{file_name}', 'url': file_url}
response = requests.post(upload_url, headers = headers, params = params)
return response.json()
folder_name = put_folder(input("введите имя папки для загрузки фотографий: "))
name_list = []
data = []
size_list = []
for photos in tqdm(res.json()['response']['items']):
sizes = photos['sizes']
for picture in sizes:
size_list.append(picture['type'])
size_list.sort(reverse=True)
for picture1 in sizes:
data_dict = {}
if picture1['type'] == size_list[0]:
href = picture1['url']
filename = photos['likes']['count']
if filename in name_list:
filename = f"{photos['likes']['count']}+{datetime.datetime.fromtimestamp(photos['date']).isoformat().replace(':', '|')}"
post_file(href, f"{folder_name}/{filename}")
else:
post_file(href, f"{folder_name}/{filename}")
data_dict['file_name'] = filename
data_dict['size'] = picture1['type']
data.append(data_dict)
name_list.append(filename)
size_list.clear()
time.sleep(1)
with open ('foto.json', 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
|
normal
|
{
"blob_id": "a22bc3bdb5e35060eff7f523b90d605ff2dd3878",
"index": 9581,
"step-1": "<mask token>\n\n\ndef ya_headers():\n return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'\n .format(token_ya)}\n\n\ndef put_folder(path):\n url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n headers = ya_headers()\n params = {'path': path, 'url': url}\n response = requests.put(url, headers=headers, params=params)\n if response.status_code == 201:\n print('папка создана')\n elif response.status_code == 409:\n print('Папка уже существует. Файлы будут помещены в неё.')\n return path\n\n\ndef post_file(file_url, file_name):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = ya_headers()\n params = {'path': f'/{file_name}', 'url': file_url}\n response = requests.post(upload_url, headers=headers, params=params)\n return response.json()\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(filename='logo.log', level=logging.DEBUG, filemode='w')\nlogging.debug('debug message')\nlogging.info('info message')\n<mask token>\n\n\ndef ya_headers():\n return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'\n .format(token_ya)}\n\n\ndef put_folder(path):\n url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n headers = ya_headers()\n params = {'path': path, 'url': url}\n response = requests.put(url, headers=headers, params=params)\n if response.status_code == 201:\n print('папка создана')\n elif response.status_code == 409:\n print('Папка уже существует. Файлы будут помещены в неё.')\n return path\n\n\ndef post_file(file_url, file_name):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = ya_headers()\n params = {'path': f'/{file_name}', 'url': file_url}\n response = requests.post(upload_url, headers=headers, params=params)\n return response.json()\n\n\n<mask token>\nfor photos in tqdm(res.json()['response']['items']):\n sizes = photos['sizes']\n for picture in sizes:\n size_list.append(picture['type'])\n size_list.sort(reverse=True)\n for picture1 in sizes:\n data_dict = {}\n if picture1['type'] == size_list[0]:\n href = picture1['url']\n filename = photos['likes']['count']\n if filename in name_list:\n filename = (\n f\"{photos['likes']['count']}+{datetime.datetime.fromtimestamp(photos['date']).isoformat().replace(':', '|')}\"\n )\n post_file(href, f'{folder_name}/{filename}')\n else:\n post_file(href, f'{folder_name}/{filename}')\n data_dict['file_name'] = filename\n data_dict['size'] = picture1['type']\n data.append(data_dict)\n name_list.append(filename)\n size_list.clear()\n time.sleep(1)\nwith open('foto.json', 'w') as f:\n json.dump(data, f, ensure_ascii=False, indent=2)\n",
"step-3": "<mask token>\nlogging.basicConfig(filename='logo.log', level=logging.DEBUG, filemode='w')\nlogging.debug('debug message')\nlogging.info('info message')\nid_vk = input('введите id пользователя вк: ')\ntoken_vk = input('введите токен вк: ')\nurl = 'https://api.vk.com/method/photos.get'\nparams = {'user_id': id_vk, 'access_token': token_vk, 'v': '5.131',\n 'album_id': 'profile', 'extended': '1', 'photo_sizes': '1'}\nres = requests.get(url, params=params)\ntoken_ya = input('введите токен Yandex: ')\n\n\ndef ya_headers():\n return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'\n .format(token_ya)}\n\n\ndef put_folder(path):\n url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n headers = ya_headers()\n params = {'path': path, 'url': url}\n response = requests.put(url, headers=headers, params=params)\n if response.status_code == 201:\n print('папка создана')\n elif response.status_code == 409:\n print('Папка уже существует. Файлы будут помещены в неё.')\n return path\n\n\ndef post_file(file_url, file_name):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = ya_headers()\n params = {'path': f'/{file_name}', 'url': file_url}\n response = requests.post(upload_url, headers=headers, params=params)\n return response.json()\n\n\nfolder_name = put_folder(input('введите имя папки для загрузки фотографий: '))\nname_list = []\ndata = []\nsize_list = []\nfor photos in tqdm(res.json()['response']['items']):\n sizes = photos['sizes']\n for picture in sizes:\n size_list.append(picture['type'])\n size_list.sort(reverse=True)\n for picture1 in sizes:\n data_dict = {}\n if picture1['type'] == size_list[0]:\n href = picture1['url']\n filename = photos['likes']['count']\n if filename in name_list:\n filename = (\n f\"{photos['likes']['count']}+{datetime.datetime.fromtimestamp(photos['date']).isoformat().replace(':', '|')}\"\n )\n post_file(href, f'{folder_name}/{filename}')\n else:\n post_file(href, f'{folder_name}/{filename}')\n data_dict['file_name'] = filename\n data_dict['size'] = picture1['type']\n data.append(data_dict)\n name_list.append(filename)\n size_list.clear()\n time.sleep(1)\nwith open('foto.json', 'w') as f:\n json.dump(data, f, ensure_ascii=False, indent=2)\n",
"step-4": "import requests\nimport datetime\nimport time\nfrom tqdm import tqdm\nimport json\nimport logging\nlogging.basicConfig(filename='logo.log', level=logging.DEBUG, filemode='w')\nlogging.debug('debug message')\nlogging.info('info message')\nid_vk = input('введите id пользователя вк: ')\ntoken_vk = input('введите токен вк: ')\nurl = 'https://api.vk.com/method/photos.get'\nparams = {'user_id': id_vk, 'access_token': token_vk, 'v': '5.131',\n 'album_id': 'profile', 'extended': '1', 'photo_sizes': '1'}\nres = requests.get(url, params=params)\ntoken_ya = input('введите токен Yandex: ')\n\n\ndef ya_headers():\n return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'\n .format(token_ya)}\n\n\ndef put_folder(path):\n url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n headers = ya_headers()\n params = {'path': path, 'url': url}\n response = requests.put(url, headers=headers, params=params)\n if response.status_code == 201:\n print('папка создана')\n elif response.status_code == 409:\n print('Папка уже существует. Файлы будут помещены в неё.')\n return path\n\n\ndef post_file(file_url, file_name):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = ya_headers()\n params = {'path': f'/{file_name}', 'url': file_url}\n response = requests.post(upload_url, headers=headers, params=params)\n return response.json()\n\n\nfolder_name = put_folder(input('введите имя папки для загрузки фотографий: '))\nname_list = []\ndata = []\nsize_list = []\nfor photos in tqdm(res.json()['response']['items']):\n sizes = photos['sizes']\n for picture in sizes:\n size_list.append(picture['type'])\n size_list.sort(reverse=True)\n for picture1 in sizes:\n data_dict = {}\n if picture1['type'] == size_list[0]:\n href = picture1['url']\n filename = photos['likes']['count']\n if filename in name_list:\n filename = (\n f\"{photos['likes']['count']}+{datetime.datetime.fromtimestamp(photos['date']).isoformat().replace(':', '|')}\"\n )\n post_file(href, f'{folder_name}/{filename}')\n else:\n post_file(href, f'{folder_name}/{filename}')\n data_dict['file_name'] = filename\n data_dict['size'] = picture1['type']\n data.append(data_dict)\n name_list.append(filename)\n size_list.clear()\n time.sleep(1)\nwith open('foto.json', 'w') as f:\n json.dump(data, f, ensure_ascii=False, indent=2)\n",
"step-5": "import requests\nimport datetime\nimport time\nfrom tqdm import tqdm\nimport json\nimport logging\nlogging.basicConfig(filename='logo.log', level=logging.DEBUG, filemode='w')\nlogging.debug('debug message')\nlogging.info('info message')\n\n# from pprint import pprint\nid_vk = input('введите id пользователя вк: ')\ntoken_vk = input('введите токен вк: ')\nurl = 'https://api.vk.com/method/photos.get'\nparams = {'user_id': id_vk, 'access_token': token_vk, 'v': '5.131','album_id': 'profile', 'extended': '1', 'photo_sizes': '1'}\nres = requests.get(url, params=params)\n# pprint(res.json())\n\ntoken_ya = input('введите токен Yandex: ')\n\ndef ya_headers():\n return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'.format(token_ya)}\n\n\ndef put_folder(path):\n url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n headers = ya_headers()\n params = {'path': path, 'url': url}\n response = requests.put(url,headers = headers, params = params)\n \n if response.status_code == 201:\n print('папка создана')\n elif response.status_code == 409:\n print('Папка уже существует. Файлы будут помещены в неё.') \n\n return path\n\n\ndef post_file(file_url, file_name):\n upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n headers = ya_headers()\n params = {'path': f'/{file_name}', 'url': file_url}\n response = requests.post(upload_url, headers = headers, params = params)\n return response.json()\n\n\n \nfolder_name = put_folder(input(\"введите имя папки для загрузки фотографий: \"))\nname_list = []\ndata = []\nsize_list = []\n\nfor photos in tqdm(res.json()['response']['items']):\n \n sizes = photos['sizes']\n\n\n for picture in sizes:\n size_list.append(picture['type'])\n size_list.sort(reverse=True)\n \n for picture1 in sizes:\n data_dict = {} \n if picture1['type'] == size_list[0]:\n href = picture1['url']\n filename = photos['likes']['count'] \n if filename in name_list:\n filename = f\"{photos['likes']['count']}+{datetime.datetime.fromtimestamp(photos['date']).isoformat().replace(':', '|')}\"\n post_file(href, f\"{folder_name}/{filename}\") \n else:\n post_file(href, f\"{folder_name}/{filename}\")\n\n data_dict['file_name'] = filename\n data_dict['size'] = picture1['type']\n data.append(data_dict) \n \n \n name_list.append(filename)\n size_list.clear() \n \n time.sleep(1)\nwith open ('foto.json', 'w') as f:\n json.dump(data, f, ensure_ascii=False, indent=2) ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
@implementer(IMicrosite)
class Microsite(Container):
<|reserved_special_token_0|>
def getLocallyAllowedTypes(self):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
portal_types = api.portal.get_tool('portal_types')
my_type = portal_types.getTypeInfo(self)
result = portal_types.listTypeInfo()
return [t for t in result if my_type.allowType(t.getId()) and t.
isConstructionAllowed(self)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@implementer(IMicrosite)
class Microsite(Container):
<|reserved_special_token_0|>
def getLocallyAllowedTypes(self):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
portal_types = api.portal.get_tool('portal_types')
my_type = portal_types.getTypeInfo(self)
result = portal_types.listTypeInfo()
return [t for t in result if my_type.allowType(t.getId()) and t.
isConstructionAllowed(self)]
def getImmediatelyAddableTypes(self, context=None):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
return self.getLocallyAllowedTypes()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@implementer(IMicrosite)
class Microsite(Container):
"""A microsite."""
def getLocallyAllowedTypes(self):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
portal_types = api.portal.get_tool('portal_types')
my_type = portal_types.getTypeInfo(self)
result = portal_types.listTypeInfo()
return [t for t in result if my_type.allowType(t.getId()) and t.
isConstructionAllowed(self)]
def getImmediatelyAddableTypes(self, context=None):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
return self.getLocallyAllowedTypes()
<|reserved_special_token_1|>
from plone import api
from plone.dexterity.content import Container
from sc.microsite.interfaces import IMicrosite
from zope.interface import implementer
@implementer(IMicrosite)
class Microsite(Container):
"""A microsite."""
def getLocallyAllowedTypes(self):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
portal_types = api.portal.get_tool('portal_types')
my_type = portal_types.getTypeInfo(self)
result = portal_types.listTypeInfo()
return [t for t in result if my_type.allowType(t.getId()) and t.
isConstructionAllowed(self)]
def getImmediatelyAddableTypes(self, context=None):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
return self.getLocallyAllowedTypes()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from plone import api
from plone.dexterity.content import Container
from sc.microsite.interfaces import IMicrosite
from zope.interface import implementer
@implementer(IMicrosite)
class Microsite(Container):
"""A microsite."""
def getLocallyAllowedTypes(self):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
portal_types = api.portal.get_tool('portal_types')
my_type = portal_types.getTypeInfo(self)
result = portal_types.listTypeInfo()
return [t for t in result if my_type.allowType(t.getId()) and
t.isConstructionAllowed(self)]
def getImmediatelyAddableTypes(self, context=None):
"""
By now we allow all allowed types without constrain.
TODO: fully implement ISelectableConstrainTypes
"""
return self.getLocallyAllowedTypes()
|
flexible
|
{
"blob_id": "3d5d88edca5d746b830363cc9451bda94c1d7aa4",
"index": 2905,
"step-1": "<mask token>\n\n\n@implementer(IMicrosite)\nclass Microsite(Container):\n <mask token>\n\n def getLocallyAllowedTypes(self):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n portal_types = api.portal.get_tool('portal_types')\n my_type = portal_types.getTypeInfo(self)\n result = portal_types.listTypeInfo()\n return [t for t in result if my_type.allowType(t.getId()) and t.\n isConstructionAllowed(self)]\n <mask token>\n",
"step-2": "<mask token>\n\n\n@implementer(IMicrosite)\nclass Microsite(Container):\n <mask token>\n\n def getLocallyAllowedTypes(self):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n portal_types = api.portal.get_tool('portal_types')\n my_type = portal_types.getTypeInfo(self)\n result = portal_types.listTypeInfo()\n return [t for t in result if my_type.allowType(t.getId()) and t.\n isConstructionAllowed(self)]\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n return self.getLocallyAllowedTypes()\n",
"step-3": "<mask token>\n\n\n@implementer(IMicrosite)\nclass Microsite(Container):\n \"\"\"A microsite.\"\"\"\n\n def getLocallyAllowedTypes(self):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n portal_types = api.portal.get_tool('portal_types')\n my_type = portal_types.getTypeInfo(self)\n result = portal_types.listTypeInfo()\n return [t for t in result if my_type.allowType(t.getId()) and t.\n isConstructionAllowed(self)]\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n return self.getLocallyAllowedTypes()\n",
"step-4": "from plone import api\nfrom plone.dexterity.content import Container\nfrom sc.microsite.interfaces import IMicrosite\nfrom zope.interface import implementer\n\n\n@implementer(IMicrosite)\nclass Microsite(Container):\n \"\"\"A microsite.\"\"\"\n\n def getLocallyAllowedTypes(self):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n portal_types = api.portal.get_tool('portal_types')\n my_type = portal_types.getTypeInfo(self)\n result = portal_types.listTypeInfo()\n return [t for t in result if my_type.allowType(t.getId()) and t.\n isConstructionAllowed(self)]\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n return self.getLocallyAllowedTypes()\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom plone import api\nfrom plone.dexterity.content import Container\nfrom sc.microsite.interfaces import IMicrosite\nfrom zope.interface import implementer\n\n\n@implementer(IMicrosite)\nclass Microsite(Container):\n \"\"\"A microsite.\"\"\"\n\n def getLocallyAllowedTypes(self):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n portal_types = api.portal.get_tool('portal_types')\n my_type = portal_types.getTypeInfo(self)\n result = portal_types.listTypeInfo()\n return [t for t in result if my_type.allowType(t.getId()) and\n t.isConstructionAllowed(self)]\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"\n By now we allow all allowed types without constrain.\n TODO: fully implement ISelectableConstrainTypes\n \"\"\"\n return self.getLocallyAllowedTypes()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#loading data from CSV
training_data_df = pd.read_csv("sales_data_training.csv")
test_data_df = pd.read_csv("sales_data_test.csv")
#scaler
scaler = MinMaxScaler(feature_range=(0,1))
#scale both inputs and outputs
scaled_training = scaler.fit_transform(training_data_df)
scaled_testing = scaler.transform(test_data_df)
#to bring it back to the original values
print("Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}".format(scaler.scale_[8], scaler.min_[8]))
#create a new scaled dataframe object
scaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df.columns.values)
scaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.columns.values)
#save the scaled dataframe to new csv files
scaled_training_df.to_csv("sales_data_training_scaled.csv", index=False)
scaled_training_df.to_csv("sales_data_test_scaled.csv", index=False)
|
normal
|
{
"blob_id": "050e2207ac7331444d39305869c4b25bcbc53907",
"index": 244,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n 'Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}'\n .format(scaler.scale_[8], scaler.min_[8]))\n<mask token>\nscaled_training_df.to_csv('sales_data_training_scaled.csv', index=False)\nscaled_training_df.to_csv('sales_data_test_scaled.csv', index=False)\n",
"step-3": "<mask token>\ntraining_data_df = pd.read_csv('sales_data_training.csv')\ntest_data_df = pd.read_csv('sales_data_test.csv')\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled_training = scaler.fit_transform(training_data_df)\nscaled_testing = scaler.transform(test_data_df)\nprint(\n 'Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}'\n .format(scaler.scale_[8], scaler.min_[8]))\nscaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df\n .columns.values)\nscaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.\n columns.values)\nscaled_training_df.to_csv('sales_data_training_scaled.csv', index=False)\nscaled_training_df.to_csv('sales_data_test_scaled.csv', index=False)\n",
"step-4": "import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\ntraining_data_df = pd.read_csv('sales_data_training.csv')\ntest_data_df = pd.read_csv('sales_data_test.csv')\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled_training = scaler.fit_transform(training_data_df)\nscaled_testing = scaler.transform(test_data_df)\nprint(\n 'Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}'\n .format(scaler.scale_[8], scaler.min_[8]))\nscaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df\n .columns.values)\nscaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.\n columns.values)\nscaled_training_df.to_csv('sales_data_training_scaled.csv', index=False)\nscaled_training_df.to_csv('sales_data_test_scaled.csv', index=False)\n",
"step-5": "import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n#loading data from CSV\ntraining_data_df = pd.read_csv(\"sales_data_training.csv\")\ntest_data_df = pd.read_csv(\"sales_data_test.csv\")\n\n#scaler\nscaler = MinMaxScaler(feature_range=(0,1))\n\n#scale both inputs and outputs\nscaled_training = scaler.fit_transform(training_data_df)\nscaled_testing = scaler.transform(test_data_df)\n\n#to bring it back to the original values\nprint(\"Note: total_earnings values were scaled by multiplying by {:.10f} and adding {:.6f}\".format(scaler.scale_[8], scaler.min_[8]))\n\n#create a new scaled dataframe object\nscaled_training_df = pd.DataFrame(scaled_training, columns=training_data_df.columns.values)\nscaled_testing_df = pd.DataFrame(scaled_testing, columns=test_data_df.columns.values)\n\n#save the scaled dataframe to new csv files\nscaled_training_df.to_csv(\"sales_data_training_scaled.csv\", index=False)\nscaled_training_df.to_csv(\"sales_data_test_scaled.csv\", index=False)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def canonicalize_name(name):
return _canonicalize_regex.sub('-', name).lower()
def page_index(packages):
yield PAGE_FMT
for p in packages:
name = p.name
url = name
yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)
def page_package(package):
yield PKG_PAGE_FMT.format(name=package.name)
for name, link in package.links:
yield ENTRY_FMT.format(name=name, url=link)
<|reserved_special_token_0|>
def make_request_handler(index):
"""
Arguments
---------
index: dict-like
- allows key lookups
- has a values() function that returns a list of
package instances.
- supports get
"""
root_paths = {'', '/'}
class PyPiRequestHandler(BaseHTTPRequestHandler):
def get_package(self, package_name):
package = index.get(package_name)
return package
def write_unicode(self, text):
self.wfile.write(bytearray(text, encoding='utf-8'))
def do_GET(self):
print('GET', self.path)
if self.path in root_paths:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_index(index.values()):
self.write_unicode(line)
else:
package_name = self.path.strip('/')
package = self.get_package(package_name)
if not package:
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.write_unicode(msg_404(package_name))
return
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_package(package):
self.write_unicode(line)
return PyPiRequestHandler
def main(packages, index=None, host='', port=7890):
if index is None:
index = {}
for p in packages:
index[canonicalize_name(p.name)] = p
try:
server = HTTPServer((host, port), make_request_handler(index))
print('Started mpypi on port {}'.format(port))
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def canonicalize_name(name):
return _canonicalize_regex.sub('-', name).lower()
def page_index(packages):
yield PAGE_FMT
for p in packages:
name = p.name
url = name
yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)
def page_package(package):
yield PKG_PAGE_FMT.format(name=package.name)
for name, link in package.links:
yield ENTRY_FMT.format(name=name, url=link)
def msg_404(pkg_name):
return ('<html><body> Package <b>{}</b> does not exist.</body></html>\n'
.format(cgi.escape(pkg_name)))
def make_request_handler(index):
"""
Arguments
---------
index: dict-like
- allows key lookups
- has a values() function that returns a list of
package instances.
- supports get
"""
root_paths = {'', '/'}
class PyPiRequestHandler(BaseHTTPRequestHandler):
def get_package(self, package_name):
package = index.get(package_name)
return package
def write_unicode(self, text):
self.wfile.write(bytearray(text, encoding='utf-8'))
def do_GET(self):
print('GET', self.path)
if self.path in root_paths:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_index(index.values()):
self.write_unicode(line)
else:
package_name = self.path.strip('/')
package = self.get_package(package_name)
if not package:
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.write_unicode(msg_404(package_name))
return
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_package(package):
self.write_unicode(line)
return PyPiRequestHandler
def main(packages, index=None, host='', port=7890):
if index is None:
index = {}
for p in packages:
index[canonicalize_name(p.name)] = p
try:
server = HTTPServer((host, port), make_request_handler(index))
print('Started mpypi on port {}'.format(port))
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
<|reserved_special_token_0|>
def canonicalize_name(name):
return _canonicalize_regex.sub('-', name).lower()
def page_index(packages):
yield PAGE_FMT
for p in packages:
name = p.name
url = name
yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)
def page_package(package):
yield PKG_PAGE_FMT.format(name=package.name)
for name, link in package.links:
yield ENTRY_FMT.format(name=name, url=link)
def msg_404(pkg_name):
return ('<html><body> Package <b>{}</b> does not exist.</body></html>\n'
.format(cgi.escape(pkg_name)))
def make_request_handler(index):
"""
Arguments
---------
index: dict-like
- allows key lookups
- has a values() function that returns a list of
package instances.
- supports get
"""
root_paths = {'', '/'}
class PyPiRequestHandler(BaseHTTPRequestHandler):
def get_package(self, package_name):
package = index.get(package_name)
return package
def write_unicode(self, text):
self.wfile.write(bytearray(text, encoding='utf-8'))
def do_GET(self):
print('GET', self.path)
if self.path in root_paths:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_index(index.values()):
self.write_unicode(line)
else:
package_name = self.path.strip('/')
package = self.get_package(package_name)
if not package:
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.write_unicode(msg_404(package_name))
return
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_package(package):
self.write_unicode(line)
return PyPiRequestHandler
def main(packages, index=None, host='', port=7890):
if index is None:
index = {}
for p in packages:
index[canonicalize_name(p.name)] = p
try:
server = HTTPServer((host, port), make_request_handler(index))
print('Started mpypi on port {}'.format(port))
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
if __name__ == '__main__':
main([])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
ENTRY_FMT = '<a href="{url}">{name}</a><br/>\n'
PAGE_FMT = """<html><head><title>Simple MPyPi Index</title><meta name="api-version" value="2" /></head><body>
"""
PKG_PAGE_FMT = """<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>
"""
_canonicalize_regex = re.compile('[-_.]+')
def canonicalize_name(name):
return _canonicalize_regex.sub('-', name).lower()
def page_index(packages):
yield PAGE_FMT
for p in packages:
name = p.name
url = name
yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)
def page_package(package):
yield PKG_PAGE_FMT.format(name=package.name)
for name, link in package.links:
yield ENTRY_FMT.format(name=name, url=link)
def msg_404(pkg_name):
return ('<html><body> Package <b>{}</b> does not exist.</body></html>\n'
.format(cgi.escape(pkg_name)))
def make_request_handler(index):
"""
Arguments
---------
index: dict-like
- allows key lookups
- has a values() function that returns a list of
package instances.
- supports get
"""
root_paths = {'', '/'}
class PyPiRequestHandler(BaseHTTPRequestHandler):
def get_package(self, package_name):
package = index.get(package_name)
return package
def write_unicode(self, text):
self.wfile.write(bytearray(text, encoding='utf-8'))
def do_GET(self):
print('GET', self.path)
if self.path in root_paths:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_index(index.values()):
self.write_unicode(line)
else:
package_name = self.path.strip('/')
package = self.get_package(package_name)
if not package:
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.write_unicode(msg_404(package_name))
return
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
for line in page_package(package):
self.write_unicode(line)
return PyPiRequestHandler
def main(packages, index=None, host='', port=7890):
if index is None:
index = {}
for p in packages:
index[canonicalize_name(p.name)] = p
try:
server = HTTPServer((host, port), make_request_handler(index))
print('Started mpypi on port {}'.format(port))
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
if __name__ == '__main__':
main([])
<|reserved_special_token_1|>
#!/usr/bin/python
"""
An extensible private pypi index.
NOTES ON PACKAGE NAMES
----------------------
MPyPi tries the following when it does not find a package
with the given name in the index:
- replaces all _ with - and
- lowercases the package name
"""
from __future__ import print_function
from __future__ import unicode_literals
import cgi
import re
from .util import PY2, PY3
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
# --- format strings
ENTRY_FMT = """<a href="{url}">{name}</a><br/>\n"""
PAGE_FMT = """<html><head><title>Simple MPyPi Index</title><meta name="api-version" value="2" /></head><body>\n"""
PKG_PAGE_FMT = """<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\n"""
# ------------------------------------------------------------------------------
# Snippet from pip._vendor.packaging.core
# ------------------------------------------------------------------------------
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# INTERNALLY USED FUNCTIONS
# ------------------------------------------------------------------------------
# --- page formatting functions
def page_index(packages):
yield PAGE_FMT
for p in packages:
name = p.name
url = name
yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)
def page_package(package):
yield PKG_PAGE_FMT.format(name=package.name)
for (name, link) in package.links:
yield ENTRY_FMT.format(name=name, url=link)
def msg_404(pkg_name):
return '<html><body> Package <b>{}</b> does not exist.</body></html>\n'.format(cgi.escape(pkg_name))
def make_request_handler(index):
"""
Arguments
---------
index: dict-like
- allows key lookups
- has a values() function that returns a list of
package instances.
- supports get
"""
root_paths = {'', '/'}
class PyPiRequestHandler(BaseHTTPRequestHandler):
def get_package(self, package_name):
package = index.get(package_name)
return package
def write_unicode(self, text):
self.wfile.write(bytearray(text, encoding='utf-8'))
def do_GET(self):
print("GET", self.path)
if self.path in root_paths:
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# serve index page
for line in page_index(index.values()):
self.write_unicode(line)
else:
# follow pip standard of using lowercase names
package_name = self.path.strip('/')
package = self.get_package(package_name)
if not package:
self.send_response(404)
self.send_header('Content-type','text/html')
self.end_headers()
self.write_unicode(msg_404(package_name))
return
# serve package page
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# serve index page
for line in page_package(package):
self.write_unicode(line)
return PyPiRequestHandler
def main(packages, index=None, host='', port=7890):
# optionally create an index
if index is None:
index = {}
for p in packages:
index[canonicalize_name(p.name)] = p
try:
server = HTTPServer((host, port), make_request_handler(index))
print('Started mpypi on port {}'.format(port))
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
if __name__ == '__main__':
main([])
|
flexible
|
{
"blob_id": "bd25b97de78f04510e43f13d356eb6c0025e223d",
"index": 8121,
"step-1": "<mask token>\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\n<mask token>\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\ndef msg_404(pkg_name):\n return ('<html><body> Package <b>{}</b> does not exist.</body></html>\\n'\n .format(cgi.escape(pkg_name)))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nif PY2:\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nelse:\n from http.server import BaseHTTPRequestHandler, HTTPServer\n<mask token>\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\ndef msg_404(pkg_name):\n return ('<html><body> Package <b>{}</b> does not exist.</body></html>\\n'\n .format(cgi.escape(pkg_name)))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\nif __name__ == '__main__':\n main([])\n",
"step-4": "<mask token>\nif PY2:\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nelse:\n from http.server import BaseHTTPRequestHandler, HTTPServer\nENTRY_FMT = '<a href=\"{url}\">{name}</a><br/>\\n'\nPAGE_FMT = \"\"\"<html><head><title>Simple MPyPi Index</title><meta name=\"api-version\" value=\"2\" /></head><body>\n\"\"\"\nPKG_PAGE_FMT = \"\"\"<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\n\"\"\"\n_canonicalize_regex = re.compile('[-_.]+')\n\n\ndef canonicalize_name(name):\n return _canonicalize_regex.sub('-', name).lower()\n\n\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for name, link in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\n\ndef msg_404(pkg_name):\n return ('<html><body> Package <b>{}</b> does not exist.</body></html>\\n'\n .format(cgi.escape(pkg_name)))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print('GET', self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n if not package:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n for line in page_package(package):\n self.write_unicode(line)\n return PyPiRequestHandler\n\n\ndef main(packages, index=None, host='', port=7890):\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\n\nif __name__ == '__main__':\n main([])\n",
"step-5": "#!/usr/bin/python\n\"\"\"\nAn extensible private pypi index.\n\nNOTES ON PACKAGE NAMES\n----------------------\nMPyPi tries the following when it does not find a package \nwith the given name in the index:\n - replaces all _ with - and\n - lowercases the package name\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport cgi\nimport re\n\nfrom .util import PY2, PY3\n\nif PY2:\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nelse:\n from http.server import BaseHTTPRequestHandler, HTTPServer\n\n# --- format strings\nENTRY_FMT = \"\"\"<a href=\"{url}\">{name}</a><br/>\\n\"\"\"\nPAGE_FMT = \"\"\"<html><head><title>Simple MPyPi Index</title><meta name=\"api-version\" value=\"2\" /></head><body>\\n\"\"\"\nPKG_PAGE_FMT = \"\"\"<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\\n\"\"\"\n\n\n# ------------------------------------------------------------------------------ \n# Snippet from pip._vendor.packaging.core\n# ------------------------------------------------------------------------------ \n_canonicalize_regex = re.compile(r\"[-_.]+\")\n\ndef canonicalize_name(name):\n # This is taken from PEP 503.\n return _canonicalize_regex.sub(\"-\", name).lower()\n# ------------------------------------------------------------------------------ \n\n# ------------------------------------------------------------------------------ \n# INTERNALLY USED FUNCTIONS\n# ------------------------------------------------------------------------------ \n# --- page formatting functions\ndef page_index(packages):\n yield PAGE_FMT\n for p in packages:\n name = p.name\n url = name\n yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)\n\ndef page_package(package):\n yield PKG_PAGE_FMT.format(name=package.name)\n for (name, link) in package.links:\n yield ENTRY_FMT.format(name=name, url=link)\n\ndef msg_404(pkg_name):\n return '<html><body> Package <b>{}</b> does not exist.</body></html>\\n'.format(cgi.escape(pkg_name))\n\n\ndef make_request_handler(index):\n \"\"\"\n \n Arguments\n ---------\n index: dict-like\n - allows key lookups\n - has a values() function that returns a list of \n package instances.\n - supports get\n \"\"\"\n root_paths = {'', '/'}\n\n class PyPiRequestHandler(BaseHTTPRequestHandler):\n\n def get_package(self, package_name):\n package = index.get(package_name)\n return package\n\n def write_unicode(self, text):\n self.wfile.write(bytearray(text, encoding='utf-8'))\n\n def do_GET(self):\n print(\"GET\", self.path)\n if self.path in root_paths:\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n\n # serve index page\n for line in page_index(index.values()):\n self.write_unicode(line)\n else:\n # follow pip standard of using lowercase names\n package_name = self.path.strip('/')\n package = self.get_package(package_name)\n\n if not package:\n self.send_response(404)\n self.send_header('Content-type','text/html')\n self.end_headers()\n self.write_unicode(msg_404(package_name))\n return\n # serve package page\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n\n # serve index page\n for line in page_package(package):\n self.write_unicode(line)\n\n return PyPiRequestHandler \n\ndef main(packages, index=None, host='', port=7890):\n # optionally create an index\n if index is None:\n index = {}\n for p in packages:\n index[canonicalize_name(p.name)] = p\n try:\n server = HTTPServer((host, port), make_request_handler(index))\n print('Started mpypi on port {}'.format(port))\n server.serve_forever()\n except KeyboardInterrupt:\n print('^C received, shutting down the web server')\n server.socket.close()\n\nif __name__ == '__main__':\n main([])\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[
i - 1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[
i - 1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
return sum(min_weights) <= flow and sum(max_weights) >= flow
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[
i - 1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[
i - 1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
return sum(min_weights) <= flow and sum(max_weights) >= flow
def solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float
('inf'), scoring='sink distance'):
"""Solve the provided instance of path-flow decomposition."""
flow = instance.flow
k = instance.k
if instance.has_bad_bounds():
return set()
if instance.k == max(len(C) for C in instance.edge_cuts):
largest_cut = max(instance.edge_cuts, key=len)
weights = list(sorted(w for _, w in largest_cut))
return solve_dp(instance, silent=True, guessed_weights=weights)
max_weight = instance.max_weight_bounds[1]
feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)
)
if not silent:
print(instance.weights, feasible_weights)
largest_free = False
smallest_free = False
if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:
largest_free = True
largest = instance.max_weight_bounds[0]
if min(instance.weights) == 1:
smallest_free = True
smallest = 1
positions = list(range(int(smallest_free), k - int(largest_free)))
for diff in range(k + 1):
if not silent:
print('Diff =', diff)
for rev_indices in itertools.combinations(reversed(positions), k - diff
):
indices = list(reversed(rev_indices))
p = len(indices)
if p == k - 1:
continue
for chosen_weights in itertools.combinations(feasible_weights, p):
weights = [None] * k
for p, w in zip(indices, chosen_weights):
weights[p] = w
if smallest_free:
weights[0] = smallest
if largest_free:
weights[k - 1] = largest
if not is_feasible(weights, flow, max_weight):
continue
if not silent:
print('Trying weights', weights)
sol = solve_dp(instance, silent=True, guessed_weights=weights)
if len(sol) > 0:
if not silent:
try:
for s in sol:
print(s, sum(s.path_weights), flow)
except AttributeError:
print('Unterdetermined solution')
return sol
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_progress(iteration, total, prefix='', suffix='', decimals=1,
bar_length=100):
"""
Call in a loop to create terminal progress bar.
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent
complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = '{0:.' + str(decimals) + 'f}'
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)
),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[
i - 1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[
i - 1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
return sum(min_weights) <= flow and sum(max_weights) >= flow
def solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float
('inf'), scoring='sink distance'):
"""Solve the provided instance of path-flow decomposition."""
flow = instance.flow
k = instance.k
if instance.has_bad_bounds():
return set()
if instance.k == max(len(C) for C in instance.edge_cuts):
largest_cut = max(instance.edge_cuts, key=len)
weights = list(sorted(w for _, w in largest_cut))
return solve_dp(instance, silent=True, guessed_weights=weights)
max_weight = instance.max_weight_bounds[1]
feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)
)
if not silent:
print(instance.weights, feasible_weights)
largest_free = False
smallest_free = False
if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:
largest_free = True
largest = instance.max_weight_bounds[0]
if min(instance.weights) == 1:
smallest_free = True
smallest = 1
positions = list(range(int(smallest_free), k - int(largest_free)))
for diff in range(k + 1):
if not silent:
print('Diff =', diff)
for rev_indices in itertools.combinations(reversed(positions), k - diff
):
indices = list(reversed(rev_indices))
p = len(indices)
if p == k - 1:
continue
for chosen_weights in itertools.combinations(feasible_weights, p):
weights = [None] * k
for p, w in zip(indices, chosen_weights):
weights[p] = w
if smallest_free:
weights[0] = smallest
if largest_free:
weights[k - 1] = largest
if not is_feasible(weights, flow, max_weight):
continue
if not silent:
print('Trying weights', weights)
sol = solve_dp(instance, silent=True, guessed_weights=weights)
if len(sol) > 0:
if not silent:
try:
for s in sol:
print(s, sum(s.path_weights), flow)
except AttributeError:
print('Unterdetermined solution')
return sol
<|reserved_special_token_1|>
import sys
import itertools
from toboggan.dp import solve as solve_dp
def print_progress(iteration, total, prefix='', suffix='', decimals=1,
bar_length=100):
"""
Call in a loop to create terminal progress bar.
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent
complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = '{0:.' + str(decimals) + 'f}'
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)
),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[
i - 1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[
i - 1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
return sum(min_weights) <= flow and sum(max_weights) >= flow
def solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float
('inf'), scoring='sink distance'):
"""Solve the provided instance of path-flow decomposition."""
flow = instance.flow
k = instance.k
if instance.has_bad_bounds():
return set()
if instance.k == max(len(C) for C in instance.edge_cuts):
largest_cut = max(instance.edge_cuts, key=len)
weights = list(sorted(w for _, w in largest_cut))
return solve_dp(instance, silent=True, guessed_weights=weights)
max_weight = instance.max_weight_bounds[1]
feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)
)
if not silent:
print(instance.weights, feasible_weights)
largest_free = False
smallest_free = False
if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:
largest_free = True
largest = instance.max_weight_bounds[0]
if min(instance.weights) == 1:
smallest_free = True
smallest = 1
positions = list(range(int(smallest_free), k - int(largest_free)))
for diff in range(k + 1):
if not silent:
print('Diff =', diff)
for rev_indices in itertools.combinations(reversed(positions), k - diff
):
indices = list(reversed(rev_indices))
p = len(indices)
if p == k - 1:
continue
for chosen_weights in itertools.combinations(feasible_weights, p):
weights = [None] * k
for p, w in zip(indices, chosen_weights):
weights[p] = w
if smallest_free:
weights[0] = smallest
if largest_free:
weights[k - 1] = largest
if not is_feasible(weights, flow, max_weight):
continue
if not silent:
print('Trying weights', weights)
sol = solve_dp(instance, silent=True, guessed_weights=weights)
if len(sol) > 0:
if not silent:
try:
for s in sol:
print(s, sum(s.path_weights), flow)
except AttributeError:
print('Unterdetermined solution')
return sol
<|reserved_special_token_1|>
#! /usr/bin/env python3
#
# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,
# and is Copyright (C) North Carolina State University, 2017. It is licensed
# under the three-clause BSD license; see LICENSE.
#
# -*- coding: utf-8 -*-
# python libs
import sys
import itertools
# local imports
from toboggan.dp import solve as solve_dp
# Print iterations progress
def print_progress(iteration, total, prefix='', suffix='', decimals=1,
bar_length=100):
"""
Call in a loop to create terminal progress bar.
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent
complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',
suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def is_feasible(weights, flow, max_weight):
"""Test whether set of guessed weights is feasible."""
# In the following, we replace very occurenve of 'None' in the
# weight-array by the minimum/maximum possible value (given by the
# last/the first
# non-None value next to it).
min_weights = [1] + weights
max_weights = [max_weight] + list(reversed(weights))
for i in range(1, len(min_weights)):
min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1]
max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1]
min_weights = min_weights[1:]
max_weights = list(reversed(max_weights[1:]))
# If the flow value lies outside of the sum-of-weight estimates,
# the current guessed set of weights is infeasible.
return sum(min_weights) <= flow and sum(max_weights) >= flow
def solve(instance, silent=True, max_weight_lower=1,
max_weight_upper=float('inf'), scoring="sink distance"):
"""Solve the provided instance of path-flow decomposition."""
flow = instance.flow
k = instance.k
# quit right away if the instance has weight bounds that can't be satisfied
if instance.has_bad_bounds():
return set()
# if k equals the size of the largest edge cut, the weights are
# predetermined
if instance.k == max(len(C) for C in instance.edge_cuts):
largest_cut = max(instance.edge_cuts, key=len)
# Important: path weights must be sorted, otherwise our
# subsequent optimizations will remove this constraint.
weights = list(sorted(w for _, w in largest_cut))
return solve_dp(instance, silent=True, guessed_weights=weights)
max_weight = instance.max_weight_bounds[1]
feasible_weights = list(filter(lambda w: w <= max_weight,
instance.weights))
if not silent:
print(instance.weights, feasible_weights)
# figure out whether we get the first or last positions for free
largest_free = False
smallest_free = False
# check largest weight first
if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:
largest_free = True
largest = instance.max_weight_bounds[0]
if min(instance.weights) == 1:
smallest_free = True
smallest = 1
positions = list(range(int(smallest_free), k-int(largest_free)))
# iterate over the number of unguessed weights
for diff in range(k+1):
if not silent:
print("Diff =", diff)
# iterate over positions of guessed weights. We want them to be
# ordered, but choose the smallest first to be removed
for rev_indices in itertools.combinations(reversed(positions), k-diff):
indices = list(reversed(rev_indices))
p = len(indices)
# when k-1 values are determined, it also determines the kth value
if p == k-1:
continue
# iterate over choices for those guessed weights
for chosen_weights in itertools.combinations(feasible_weights, p):
weights = [None] * k
# assign the chosen weights to the guessed positions
for p, w in zip(indices, chosen_weights):
weights[p] = w
# add in free values
if smallest_free:
weights[0] = smallest
if largest_free:
weights[k-1] = largest
# quit if this didn't work
if not is_feasible(weights, flow, max_weight):
continue
if not silent:
print("Trying weights", weights)
sol = solve_dp(instance, silent=True, guessed_weights=weights)
if len(sol) > 0:
if not silent:
try:
for s in sol:
print(s, sum(s.path_weights), flow)
except AttributeError:
print("Unterdetermined solution")
return sol
|
flexible
|
{
"blob_id": "1b4c9841fd10d065983974e93fe5dcbe048c1281",
"index": 4180,
"step-1": "<mask token>\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n",
"step-3": "<mask token>\n\n\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = '{0:.' + str(decimals) + 'f}'\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)\n ),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n",
"step-4": "import sys\nimport itertools\nfrom toboggan.dp import solve as solve_dp\n\n\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = '{0:.' + str(decimals) + 'f}'\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)\n ),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[\n i - 1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[\n i - 1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1, max_weight_upper=float\n ('inf'), scoring='sink distance'):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n if instance.has_bad_bounds():\n return set()\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight, instance.weights)\n )\n if not silent:\n print(instance.weights, feasible_weights)\n largest_free = False\n smallest_free = False\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n positions = list(range(int(smallest_free), k - int(largest_free)))\n for diff in range(k + 1):\n if not silent:\n print('Diff =', diff)\n for rev_indices in itertools.combinations(reversed(positions), k - diff\n ):\n indices = list(reversed(rev_indices))\n p = len(indices)\n if p == k - 1:\n continue\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k - 1] = largest\n if not is_feasible(weights, flow, max_weight):\n continue\n if not silent:\n print('Trying weights', weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print('Unterdetermined solution')\n return sol\n",
"step-5": "#! /usr/bin/env python3\n#\n# This file is part of Toboggan, https://github.com/TheoryInPractice/Toboggan/,\n# and is Copyright (C) North Carolina State University, 2017. It is licensed\n# under the three-clause BSD license; see LICENSE.\n#\n# -*- coding: utf-8 -*-\n# python libs\nimport sys\nimport itertools\n# local imports\nfrom toboggan.dp import solve as solve_dp\n\n\n# Print iterations progress\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent\n complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',\n suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef is_feasible(weights, flow, max_weight):\n \"\"\"Test whether set of guessed weights is feasible.\"\"\"\n # In the following, we replace very occurenve of 'None' in the\n # weight-array by the minimum/maximum possible value (given by the\n # last/the first\n # non-None value next to it).\n min_weights = [1] + weights\n max_weights = [max_weight] + list(reversed(weights))\n for i in range(1, len(min_weights)):\n min_weights[i] = min_weights[i] if min_weights[i] else min_weights[i-1]\n max_weights[i] = max_weights[i] if max_weights[i] else max_weights[i-1]\n min_weights = min_weights[1:]\n max_weights = list(reversed(max_weights[1:]))\n\n # If the flow value lies outside of the sum-of-weight estimates,\n # the current guessed set of weights is infeasible.\n return sum(min_weights) <= flow and sum(max_weights) >= flow\n\n\ndef solve(instance, silent=True, max_weight_lower=1,\n max_weight_upper=float('inf'), scoring=\"sink distance\"):\n \"\"\"Solve the provided instance of path-flow decomposition.\"\"\"\n flow = instance.flow\n k = instance.k\n\n # quit right away if the instance has weight bounds that can't be satisfied\n if instance.has_bad_bounds():\n return set()\n\n # if k equals the size of the largest edge cut, the weights are\n # predetermined\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n # Important: path weights must be sorted, otherwise our\n # subsequent optimizations will remove this constraint.\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight,\n instance.weights))\n\n if not silent:\n print(instance.weights, feasible_weights)\n\n # figure out whether we get the first or last positions for free\n largest_free = False\n smallest_free = False\n # check largest weight first\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n\n positions = list(range(int(smallest_free), k-int(largest_free)))\n\n # iterate over the number of unguessed weights\n for diff in range(k+1):\n if not silent:\n print(\"Diff =\", diff)\n # iterate over positions of guessed weights. We want them to be\n # ordered, but choose the smallest first to be removed\n for rev_indices in itertools.combinations(reversed(positions), k-diff):\n indices = list(reversed(rev_indices))\n p = len(indices)\n # when k-1 values are determined, it also determines the kth value\n if p == k-1:\n continue\n # iterate over choices for those guessed weights\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n\n # assign the chosen weights to the guessed positions\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n\n # add in free values\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k-1] = largest\n\n # quit if this didn't work\n if not is_feasible(weights, flow, max_weight):\n continue\n\n if not silent:\n print(\"Trying weights\", weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print(\"Unterdetermined solution\")\n return sol\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0,
'error': _parse_error_message(run_dir)
}
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
v['latticeCommands'] = _generate_lattice(data['models'])
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))
|
normal
|
{
"blob_id": "9e6fd6620b4ec6a574d7948fb0d14b0a2ad0d24e",
"index": 5240,
"step-1": "<mask token>\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\n<mask token>\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\n<mask token>\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\n<mask token>\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\n<mask token>\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\n<mask token>\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\n<mask token>\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\n<mask token>\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\n<mask token>\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-2": "<mask token>\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\n<mask token>\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data,\n _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\n<mask token>\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\n<mask token>\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models),\n _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile), 'COMPRESS' if models.\n beam.cstCompress else '')\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\n<mask token>\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.energyDeviation, dist.meanPhase, dist.\n phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.\n format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(template_common.lib_file_name('beam',\n 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n raise RuntimeError('unknown longitudinal distribution: {}'.format(\n models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\n<mask token>\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\n<mask token>\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile',\n solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.\n cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-3": "<mask token>\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data,\n _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\n<mask token>\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n r = data['report']\n if r == 'animation':\n return []\n res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [\n 'beam', 'ellipticalDistribution', 'energyPhaseDistribution',\n 'solenoid', 'sphericalDistribution', 'twissDistribution']\n for f in template_common.lib_files(data):\n res.append(f.mtime())\n return res\n\n\ndef python_source_for_model(data, model):\n return (\n \"\"\"\nfrom rslinac import solver\n\n{}\n\nwith open('input.txt', 'w') as f:\n f.write(input_file)\n\nwith open('defaults.ini', 'w') as f:\n f.write(ini_file)\n\ns = solver.BeamSolver('defaults.ini', 'input.txt')\ns.solve()\ns.save_output('output.txt')\n \"\"\"\n .format(_generate_parameters_file(data, is_parallel=len(data.models\n .beamline))))\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models),\n _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile), 'COMPRESS' if models.\n beam.cstCompress else '')\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\ndef _generate_current(models):\n return 'CURRENT {} {}'.format(models.beam.current, models.beam.\n numberOfParticles)\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.energyDeviation, dist.meanPhase, dist.\n phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.\n format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(template_common.lib_file_name('beam',\n 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n raise RuntimeError('unknown longitudinal distribution: {}'.format(\n models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['optionsCommand'] = _generate_options(data['models'])\n v['solenoidCommand'] = _generate_solenoid(data['models'])\n v['beamCommand'] = _generate_beam(data['models'])\n v['currentCommand'] = _generate_current(data['models'])\n v['chargeCommand'] = _generate_charge(data['models'])\n if is_parallel:\n v['latticeCommands'] = _generate_lattice(data['models'])\n else:\n v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT\n return template_common.render_jinja(SIM_TYPE, v)\n\n\ndef _generate_solenoid(models):\n solenoid = models.solenoid\n if solenoid.sourceDefinition == 'none':\n return ''\n if solenoid.sourceDefinition == 'values':\n return 'SOLENOID {} {} {}'.format(solenoid.fieldStrength, solenoid.\n length, solenoid.z0)\n if solenoid.sourceDefinition == 'file':\n return 'SOLENOID {}'.format(template_common.lib_file_name(\n 'solenoid', 'solenoidFile', solenoid.solenoidFile))\n raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.\n sourceDefinition))\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\n<mask token>\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile',\n solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.\n cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\nfrom pykern import pkcollections\nfrom pykern import pkio\nfrom pykern.pkdebug import pkdc, pkdp\nfrom rslinac import solver\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common, hellweg_dump_reader\nimport math\nimport numpy as np\nimport os.path\nimport py.path\nimport re\nHELLWEG_DUMP_FILE = 'all-data.bin'\nHELLWEG_SUMMARY_FILE = 'output.txt'\nHELLWEG_INI_FILE = 'defaults.ini'\nHELLWEG_INPUT_FILE = 'input.txt'\nSIM_TYPE = 'hellweg'\nWANT_BROWSER_FRAME_CACHE = True\n_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + '\\n'\n_HELLWEG_PARSED_FILE = 'PARSED.TXT'\n_REPORT_STYLE_FIELDS = ['colorMap', 'notes']\n_SCHEMA = simulation_db.get_schema(SIM_TYPE)\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data,\n _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\ndef lib_files(data, source_lib):\n return template_common.filename_to_path(_simulation_files(data), source_lib\n )\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n r = data['report']\n if r == 'animation':\n return []\n res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [\n 'beam', 'ellipticalDistribution', 'energyPhaseDistribution',\n 'solenoid', 'sphericalDistribution', 'twissDistribution']\n for f in template_common.lib_files(data):\n res.append(f.mtime())\n return res\n\n\ndef python_source_for_model(data, model):\n return (\n \"\"\"\nfrom rslinac import solver\n\n{}\n\nwith open('input.txt', 'w') as f:\n f.write(input_file)\n\nwith open('defaults.ini', 'w') as f:\n f.write(ini_file)\n\ns = solver.BeamSolver('defaults.ini', 'input.txt')\ns.solve()\ns.save_output('output.txt')\n \"\"\"\n .format(_generate_parameters_file(data, is_parallel=len(data.models\n .beamline))))\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models),\n _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile), 'COMPRESS' if models.\n beam.cstCompress else '')\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\ndef _generate_current(models):\n return 'CURRENT {} {}'.format(models.beam.current, models.beam.\n numberOfParticles)\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.energyDeviation, dist.meanPhase, dist.\n phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.\n format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(template_common.lib_file_name('beam',\n 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n raise RuntimeError('unknown longitudinal distribution: {}'.format(\n models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['optionsCommand'] = _generate_options(data['models'])\n v['solenoidCommand'] = _generate_solenoid(data['models'])\n v['beamCommand'] = _generate_beam(data['models'])\n v['currentCommand'] = _generate_current(data['models'])\n v['chargeCommand'] = _generate_charge(data['models'])\n if is_parallel:\n v['latticeCommands'] = _generate_lattice(data['models'])\n else:\n v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT\n return template_common.render_jinja(SIM_TYPE, v)\n\n\ndef _generate_solenoid(models):\n solenoid = models.solenoid\n if solenoid.sourceDefinition == 'none':\n return ''\n if solenoid.sourceDefinition == 'values':\n return 'SOLENOID {} {} {}'.format(solenoid.fieldStrength, solenoid.\n length, solenoid.z0)\n if solenoid.sourceDefinition == 'file':\n return 'SOLENOID {}'.format(template_common.lib_file_name(\n 'solenoid', 'solenoidFile', solenoid.solenoidFile))\n raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.\n sourceDefinition))\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\ndef _parse_error_message(run_dir):\n path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)\n if not os.path.exists(path):\n return 'No elements generated'\n text = pkio.read_text(str(path))\n for line in text.split('\\n'):\n match = re.search('^ERROR:\\\\s(.*)$', line)\n if match:\n return match.group(1)\n return 'No output generated'\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile',\n solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.\n cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-5": "# -*- coding: utf-8 -*-\nu\"\"\"Hellweg execution template.\n\n:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom pykern import pkcollections\nfrom pykern import pkio\nfrom pykern.pkdebug import pkdc, pkdp\nfrom rslinac import solver\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common, hellweg_dump_reader\nimport math\nimport numpy as np\nimport os.path\nimport py.path\nimport re\n\nHELLWEG_DUMP_FILE = 'all-data.bin'\n\nHELLWEG_SUMMARY_FILE = 'output.txt'\n\nHELLWEG_INI_FILE = 'defaults.ini'\n\nHELLWEG_INPUT_FILE = 'input.txt'\n\n#: Simulation type\nSIM_TYPE = 'hellweg'\n\nWANT_BROWSER_FRAME_CACHE = True\n\n# lattice element is required so make it very short and wide drift\n_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + \"\\n\"\n\n_HELLWEG_PARSED_FILE = 'PARSED.TXT'\n\n_REPORT_STYLE_FIELDS = ['colorMap', 'notes']\n\n_SCHEMA = simulation_db.get_schema(SIM_TYPE)\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {\n 'percentComplete': 0,\n 'frameCount': 0,\n }\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {\n 'lastUpdateTime': last_update_time,\n 'percentComplete': 100,\n 'frameCount': frame_count,\n 'summaryData': _summary_text(run_dir),\n }\n return {\n 'percentComplete': 100,\n 'frameCount': 0,\n 'error': _parse_error_message(run_dir)\n }\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))\n return {\n 'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),\n 'x_range': [edges[0], edges[-1]],\n 'y_label': 'Number of Particles',\n 'x_label': hellweg_dump_reader.get_label(report.reportType),\n 'points': hist.T.tolist(),\n }\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [\n hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y),\n ]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {\n 'x_label': hellweg_dump_reader.get_label(x),\n 'y_label': hellweg_dump_reader.get_label(y),\n 'title': _report_title(report.reportType, 'BeamReportType', beam_info),\n 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir),\n })\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(\n os.path.join(str(run_dir), HELLWEG_INI_FILE),\n os.path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {\n 'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]],\n 'y_label': hellweg_dump_reader.get_parameter_label(y1_var),\n 'x_label': hellweg_dump_reader.get_parameter_label(x_field),\n 'x_points': x,\n 'points': [\n y1,\n y2,\n ],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],\n 'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),\n 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),\n }\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {\n 'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)],\n 'y_label': hellweg_dump_reader.get_label(report.reportType),\n 'x_label': hellweg_dump_reader.get_label(x_field),\n 'x_points': x,\n 'points': particle_info['y_values'],\n 'y_range': particle_info['y_range'],\n }\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data, _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\ndef lib_files(data, source_lib):\n return template_common.filename_to_path(_simulation_files(data), source_lib)\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(\n data,\n {\n '1': ['reportType', 'histogramBins', 'startTime'],\n '': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],\n },\n )\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(\n data,\n {'': ['reportType', 'histogramBins', 'startTime']},\n )\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(\n data,\n {'': ['reportType', 'renderCount', 'startTime']},\n )\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(\n data,\n {'': ['reportType', 'startTime']},\n )\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n r = data['report']\n if r == 'animation':\n return []\n res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [\n 'beam',\n 'ellipticalDistribution',\n 'energyPhaseDistribution',\n 'solenoid',\n 'sphericalDistribution',\n 'twissDistribution',\n ]\n for f in template_common.lib_files(data):\n res.append(f.mtime())\n return res\n\n\ndef python_source_for_model(data, model):\n return '''\nfrom rslinac import solver\n\n{}\n\nwith open('input.txt', 'w') as f:\n f.write(input_file)\n\nwith open('defaults.ini', 'w') as f:\n f.write(ini_file)\n\ns = solver.BeamSolver('defaults.ini', 'input.txt')\ns.solve()\ns.save_output('output.txt')\n '''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(\n run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(\n data,\n run_dir,\n is_parallel,\n ),\n )\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n # BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(\n template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),\n 'COMPRESS' if models.beam.cstCompress else '',\n )\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(\n template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution),\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n #TODO(pjm): add an option field to select auto-calculate\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)\n\n\ndef _generate_current(models):\n return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(\n dist.meanPhase,\n dist.phaseLength,\n dist.phaseDeviation if dist.distributionType == 'gaussian' else '',\n )\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n #TODO(pjm): implement this\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += \"\\n\"\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(\n dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(\n dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(\n template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution),\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))\n\n raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['optionsCommand'] = _generate_options(data['models'])\n v['solenoidCommand'] = _generate_solenoid(data['models'])\n v['beamCommand'] = _generate_beam(data['models'])\n v['currentCommand'] = _generate_current(data['models'])\n v['chargeCommand'] = _generate_charge(data['models'])\n if is_parallel:\n v['latticeCommands'] = _generate_lattice(data['models'])\n else:\n v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT\n return template_common.render_jinja(SIM_TYPE, v)\n\n\ndef _generate_solenoid(models):\n solenoid = models.solenoid\n if solenoid.sourceDefinition == 'none':\n return ''\n if solenoid.sourceDefinition == 'values':\n #TODO(pjm): latest version also has solenoid.fringeRegion\n return 'SOLENOID {} {} {}'.format(\n solenoid.fieldStrength, solenoid.length, solenoid.z0)\n if solenoid.sourceDefinition == 'file':\n return 'SOLENOID {}'.format(\n template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))\n raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(\n dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,\n dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\ndef _parse_error_message(run_dir):\n path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)\n if not os.path.exists(path):\n return 'No elements generated'\n text = pkio.read_text(str(path))\n for line in text.split(\"\\n\"):\n match = re.search('^ERROR:\\s(.*)$', line)\n if match:\n return match.group(1)\n return 'No output generated'\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(\n _enum_text(enum_name, report_type),\n 100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-ids": [
22,
26,
32,
36,
37
]
}
|
[
22,
26,
32,
36,
37
] |
from django.contrib.auth.models import User
from rest_framework.serializers import ModelSerializer
from app_calendar.models import Holiday, Country, Event, User
class CountrySerializer(ModelSerializer):
class Meta:
model = Country
fields = '__all__'
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__'
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class HolidaySerializerRead(ModelSerializer):
country = CountrySerializer()
class Meta:
model = Holiday
fields = '__all__'
class HolidaySerializerWrite(ModelSerializer):
class Meta:
model = Holiday
fields = '__all__'
|
normal
|
{
"blob_id": "5b366b0f6813f686600df9da4a17f190f034a10c",
"index": 2046,
"step-1": "<mask token>\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass CountrySerializer(ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = '__all__'\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-4": "from django.contrib.auth.models import User\nfrom rest_framework.serializers import ModelSerializer\nfrom app_calendar.models import Holiday, Country, Event, User\n\n\nclass CountrySerializer(ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = '__all__'\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core', '0001_initial')]
operations = [migrations.AlterField(model_name='divida', name=
'id_cliente', field=models.CharField(max_length=10)), migrations.
AlterField(model_name='divida', name='motivo', field=models.
CharField(max_length=100))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('core', '0001_initial')]
operations = [migrations.AlterField(model_name='divida', name=
'id_cliente', field=models.CharField(max_length=10)), migrations.
AlterField(model_name='divida', name='motivo', field=models.
CharField(max_length=100))]
<|reserved_special_token_1|>
# Generated by Django 2.2.1 on 2019-05-05 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='divida',
name='id_cliente',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='divida',
name='motivo',
field=models.CharField(max_length=100),
),
]
|
flexible
|
{
"blob_id": "1ce7b292f89fdf3f978c75d4cdf65b6991f71d6f",
"index": 7499,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='divida', name=\n 'id_cliente', field=models.CharField(max_length=10)), migrations.\n AlterField(model_name='divida', name='motivo', field=models.\n CharField(max_length=100))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='divida', name=\n 'id_cliente', field=models.CharField(max_length=10)), migrations.\n AlterField(model_name='divida', name='motivo', field=models.\n CharField(max_length=100))]\n",
"step-5": "# Generated by Django 2.2.1 on 2019-05-05 18:41\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='divida',\n name='id_cliente',\n field=models.CharField(max_length=10),\n ),\n migrations.AlterField(\n model_name='divida',\n name='motivo',\n field=models.CharField(max_length=100),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def index_page(url):
res = requests.get(url)
res.encoding = res.apparent_encoding
next_page(res.text)
def next_page(html):
data = json.loads(html)
for i in data['data']['list']:
img_url = i['cover']
img_name = i['title']
get_img(img_url, img_name)
def get_img(img_url, img_name):
img = requests.get(img_url)
with open('img/{}.jpg'.format(img_name), 'w+b') as f:
f.write(img.content)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
ex = futures.ThreadPoolExecutor(max_workers=50)
for i in trange(1, 152):
url = (
'https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={}&season_type=1&pagesize=20&type=1'
.format(i))
ex.submit(index_page, url)
def index_page(url):
res = requests.get(url)
res.encoding = res.apparent_encoding
next_page(res.text)
def next_page(html):
data = json.loads(html)
for i in data['data']['list']:
img_url = i['cover']
img_name = i['title']
get_img(img_url, img_name)
def get_img(img_url, img_name):
img = requests.get(img_url)
with open('img/{}.jpg'.format(img_name), 'w+b') as f:
f.write(img.content)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
ex = futures.ThreadPoolExecutor(max_workers=50)
for i in trange(1, 152):
url = (
'https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={}&season_type=1&pagesize=20&type=1'
.format(i))
ex.submit(index_page, url)
def index_page(url):
res = requests.get(url)
res.encoding = res.apparent_encoding
next_page(res.text)
def next_page(html):
data = json.loads(html)
for i in data['data']['list']:
img_url = i['cover']
img_name = i['title']
get_img(img_url, img_name)
def get_img(img_url, img_name):
img = requests.get(img_url)
with open('img/{}.jpg'.format(img_name), 'w+b') as f:
f.write(img.content)
main()
<|reserved_special_token_1|>
import requests
import json
from concurrent import futures
from tqdm import trange
def main():
ex = futures.ThreadPoolExecutor(max_workers=50)
for i in trange(1, 152):
url = (
'https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={}&season_type=1&pagesize=20&type=1'
.format(i))
ex.submit(index_page, url)
def index_page(url):
res = requests.get(url)
res.encoding = res.apparent_encoding
next_page(res.text)
def next_page(html):
data = json.loads(html)
for i in data['data']['list']:
img_url = i['cover']
img_name = i['title']
get_img(img_url, img_name)
def get_img(img_url, img_name):
img = requests.get(img_url)
with open('img/{}.jpg'.format(img_name), 'w+b') as f:
f.write(img.content)
main()
<|reserved_special_token_1|>
import requests
import json
from concurrent import futures
from tqdm import trange
def main():
ex=futures.ThreadPoolExecutor(max_workers=50)
for i in trange(1,152):
url="https://api.bilibili.com/pgc/season/index/result?season_version=-1&" \
"area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&" \
"page={}&" \
"season_type=1&pagesize=20&type=1".format(i)
ex.submit(index_page,url)
def index_page(url):
res=requests.get(url)
res.encoding=res.apparent_encoding
next_page(res.text)
def next_page(html):
data=json.loads(html)
for i in data['data']['list']:
img_url=i['cover']
img_name=i['title']
# print(img_name)
get_img(img_url,img_name)
def get_img(img_url,img_name):
img=requests.get(img_url)
with open('img/{}.jpg'.format(img_name),'w+b') as f:
f.write(img.content)
main()
|
flexible
|
{
"blob_id": "ff8b6bc607dac889da05b9f7e9b3595151153614",
"index": 7358,
"step-1": "<mask token>\n\n\ndef index_page(url):\n res = requests.get(url)\n res.encoding = res.apparent_encoding\n next_page(res.text)\n\n\ndef next_page(html):\n data = json.loads(html)\n for i in data['data']['list']:\n img_url = i['cover']\n img_name = i['title']\n get_img(img_url, img_name)\n\n\ndef get_img(img_url, img_name):\n img = requests.get(img_url)\n with open('img/{}.jpg'.format(img_name), 'w+b') as f:\n f.write(img.content)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n ex = futures.ThreadPoolExecutor(max_workers=50)\n for i in trange(1, 152):\n url = (\n 'https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={}&season_type=1&pagesize=20&type=1'\n .format(i))\n ex.submit(index_page, url)\n\n\ndef index_page(url):\n res = requests.get(url)\n res.encoding = res.apparent_encoding\n next_page(res.text)\n\n\ndef next_page(html):\n data = json.loads(html)\n for i in data['data']['list']:\n img_url = i['cover']\n img_name = i['title']\n get_img(img_url, img_name)\n\n\ndef get_img(img_url, img_name):\n img = requests.get(img_url)\n with open('img/{}.jpg'.format(img_name), 'w+b') as f:\n f.write(img.content)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n ex = futures.ThreadPoolExecutor(max_workers=50)\n for i in trange(1, 152):\n url = (\n 'https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={}&season_type=1&pagesize=20&type=1'\n .format(i))\n ex.submit(index_page, url)\n\n\ndef index_page(url):\n res = requests.get(url)\n res.encoding = res.apparent_encoding\n next_page(res.text)\n\n\ndef next_page(html):\n data = json.loads(html)\n for i in data['data']['list']:\n img_url = i['cover']\n img_name = i['title']\n get_img(img_url, img_name)\n\n\ndef get_img(img_url, img_name):\n img = requests.get(img_url)\n with open('img/{}.jpg'.format(img_name), 'w+b') as f:\n f.write(img.content)\n\n\nmain()\n",
"step-4": "import requests\nimport json\nfrom concurrent import futures\nfrom tqdm import trange\n\n\ndef main():\n ex = futures.ThreadPoolExecutor(max_workers=50)\n for i in trange(1, 152):\n url = (\n 'https://api.bilibili.com/pgc/season/index/result?season_version=-1&area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={}&season_type=1&pagesize=20&type=1'\n .format(i))\n ex.submit(index_page, url)\n\n\ndef index_page(url):\n res = requests.get(url)\n res.encoding = res.apparent_encoding\n next_page(res.text)\n\n\ndef next_page(html):\n data = json.loads(html)\n for i in data['data']['list']:\n img_url = i['cover']\n img_name = i['title']\n get_img(img_url, img_name)\n\n\ndef get_img(img_url, img_name):\n img = requests.get(img_url)\n with open('img/{}.jpg'.format(img_name), 'w+b') as f:\n f.write(img.content)\n\n\nmain()\n",
"step-5": "import requests\r\nimport json\r\nfrom concurrent import futures\r\nfrom tqdm import trange\r\n\r\ndef main():\r\n ex=futures.ThreadPoolExecutor(max_workers=50)\r\n for i in trange(1,152):\r\n url=\"https://api.bilibili.com/pgc/season/index/result?season_version=-1&\" \\\r\n \"area=-1&is_finish=-1©right=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&\" \\\r\n \"page={}&\" \\\r\n \"season_type=1&pagesize=20&type=1\".format(i)\r\n ex.submit(index_page,url)\r\n\r\ndef index_page(url):\r\n res=requests.get(url)\r\n res.encoding=res.apparent_encoding\r\n next_page(res.text)\r\n\r\ndef next_page(html):\r\n data=json.loads(html)\r\n for i in data['data']['list']:\r\n img_url=i['cover']\r\n img_name=i['title']\r\n # print(img_name)\r\n get_img(img_url,img_name)\r\n\r\ndef get_img(img_url,img_name):\r\n img=requests.get(img_url)\r\n with open('img/{}.jpg'.format(img_name),'w+b') as f:\r\n f.write(img.content)\r\nmain()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import logging
import argparse
import getpass
import errno
import re
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import dns.resolver
class Mail(object):
def __init__(self, recipient=None, sender=None, subject=None, body=None):
self.recipient = recipient
self.sender = sender or '{}@example.com'.format(getpass.getuser())
self.subject = subject or 'Sir! My sir!'
self.body = body or 'A message from their majesty.'
self.verbose = False
@property
def domain(self):
m = re.match(r'.+@(\w+\.\w+)', self.recipient)
if m:
return m.group(1)
else:
raise ValueError('Unable to get recipient domain')
@property
def message(self):
m = MIMEMultipart('alternative')
m['Subject'] = self.subject
m['From'] = self.sender
m['To'] = self.recipient
m.attach(MIMEText(self.body, 'plain'))
return m
def send(self):
"""
Sends an email to a single recipient straight to his MTA.
Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.
"""
answers = dns.resolver.query(self.domain, 'MX')
try:
for answer in answers:
ex = answer.exchange.to_text()
server = smtplib.SMTP(ex)
server.set_debuglevel(self.verbose)
server.sendmail(self.sender, [self.recipient], self.message.as_string())
server.quit()
except OSError as e:
if e.errno is errno.ENETUNREACH:
print('Looks like port 25 is blocked')
raise e
class App(object):
def run(self):
mail = Mail()
self.parse(mail)
mail.send()
@classmethod
def parse(cls, mail):
parser = argparse.ArgumentParser(prog='lumpy', description=mail.send.__doc__)
arg = parser.add_argument
arg('--from', '-f', nargs='?', dest='sender')
arg('recipient')
arg('--subject', '-s', nargs='?')
arg('--body', '-b', nargs='?')
arg('--verbose', '-v', action='store_true')
parser.parse_args(namespace=mail)
if __name__ == "__main__":
App().run()
|
normal
|
{
"blob_id": "3a678f9b5274f008a510a23b2358fe2a506c3221",
"index": 4061,
"step-1": "<mask token>\n\n\nclass Mail(object):\n <mask token>\n <mask token>\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n <mask token>\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Mail(object):\n <mask token>\n\n @property\n def domain(self):\n m = re.match('.+@(\\\\w+\\\\.\\\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n <mask token>\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Mail(object):\n <mask token>\n\n @property\n def domain(self):\n m = re.match('.+@(\\\\w+\\\\.\\\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n\n def send(self):\n \"\"\"\n Sends an email to a single recipient straight to his MTA.\n Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.\n \"\"\"\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message\n .as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Mail(object):\n\n def __init__(self, recipient=None, sender=None, subject=None, body=None):\n self.recipient = recipient\n self.sender = sender or '{}@example.com'.format(getpass.getuser())\n self.subject = subject or 'Sir! My sir!'\n self.body = body or 'A message from their majesty.'\n self.verbose = False\n\n @property\n def domain(self):\n m = re.match('.+@(\\\\w+\\\\.\\\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n\n def send(self):\n \"\"\"\n Sends an email to a single recipient straight to his MTA.\n Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.\n \"\"\"\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message\n .as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.\n send.__doc__)\n arg = parser.add_argument\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n parser.parse_args(namespace=mail)\n\n\n<mask token>\n",
"step-5": "import logging\nimport argparse\nimport getpass\nimport errno\nimport re\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nimport dns.resolver\n\nclass Mail(object):\n\n def __init__(self, recipient=None, sender=None, subject=None, body=None):\n self.recipient = recipient\n self.sender = sender or '{}@example.com'.format(getpass.getuser())\n self.subject = subject or 'Sir! My sir!'\n self.body = body or 'A message from their majesty.'\n self.verbose = False\n\n @property\n def domain(self):\n m = re.match(r'.+@(\\w+\\.\\w+)', self.recipient)\n if m:\n return m.group(1)\n else:\n raise ValueError('Unable to get recipient domain')\n\n @property\n def message(self):\n m = MIMEMultipart('alternative')\n m['Subject'] = self.subject\n m['From'] = self.sender\n m['To'] = self.recipient\n m.attach(MIMEText(self.body, 'plain'))\n return m\n\n def send(self):\n \"\"\"\n Sends an email to a single recipient straight to his MTA.\n Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.\n \"\"\"\n answers = dns.resolver.query(self.domain, 'MX')\n try:\n for answer in answers:\n ex = answer.exchange.to_text()\n server = smtplib.SMTP(ex)\n server.set_debuglevel(self.verbose)\n server.sendmail(self.sender, [self.recipient], self.message.as_string())\n server.quit()\n except OSError as e:\n if e.errno is errno.ENETUNREACH:\n print('Looks like port 25 is blocked')\n raise e\n\n\nclass App(object):\n\n def run(self):\n mail = Mail()\n self.parse(mail)\n mail.send()\n\n @classmethod\n def parse(cls, mail):\n parser = argparse.ArgumentParser(prog='lumpy', description=mail.send.__doc__)\n arg = parser.add_argument\n\n arg('--from', '-f', nargs='?', dest='sender')\n arg('recipient')\n arg('--subject', '-s', nargs='?')\n arg('--body', '-b', nargs='?')\n arg('--verbose', '-v', action='store_true')\n \n parser.parse_args(namespace=mail)\n\n\nif __name__ == \"__main__\":\n App().run()\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine.setProperty('rate', 150)
<|reserved_special_token_0|>
print(rate)
<|reserved_special_token_0|>
while i < l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i + 3 < l:
time_1 = a[i - 1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000
time_1_hour = float(time_1[-2]) * 60000
time_2 = a[i + 3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2]) * 60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000
duration = float(time_2_hour + time_2_mil) - float(time_1_hour +
time_1_mil)
one_sec_segment = AudioSegment.silent(duration=int(duration))
print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),
format='wav')
i += 4
engine.stop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine = pyttsx3.init()
<|reserved_special_token_0|>
engine.setProperty('rate', 150)
rate = engine.getProperty('rate')
print(rate)
<|reserved_special_token_0|>
a = open('TrumpNewFF.srt').readlines()
i = 2
l = len(a)
while i < l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i + 3 < l:
time_1 = a[i - 1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000
time_1_hour = float(time_1[-2]) * 60000
time_2 = a[i + 3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2]) * 60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000
duration = float(time_2_hour + time_2_mil) - float(time_1_hour +
time_1_mil)
one_sec_segment = AudioSegment.silent(duration=int(duration))
print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),
format='wav')
i += 4
engine.stop()
<|reserved_special_token_1|>
import pyttsx3
from pydub import AudioSegment
engine = pyttsx3.init()
<|reserved_special_token_0|>
engine.setProperty('rate', 150)
rate = engine.getProperty('rate')
print(rate)
<|reserved_special_token_0|>
a = open('TrumpNewFF.srt').readlines()
i = 2
l = len(a)
while i < l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i + 3 < l:
time_1 = a[i - 1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000
time_1_hour = float(time_1[-2]) * 60000
time_2 = a[i + 3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2]) * 60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000
duration = float(time_2_hour + time_2_mil) - float(time_1_hour +
time_1_mil)
one_sec_segment = AudioSegment.silent(duration=int(duration))
print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),
format='wav')
i += 4
engine.stop()
<|reserved_special_token_1|>
import pyttsx3
from pydub import AudioSegment
engine = pyttsx3.init() # object creation
""" RATE"""
#printing current voice rate
engine.setProperty('rate', 150) # setting up new voice rate
rate = engine.getProperty('rate') # getting details of current speaking rate
print (rate)
"""VOLUME"""
# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)
# print (volume) #printing current volume level
# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1
# """VOICE"""
# voices = engine.getProperty('voices') #getting details of current voice
# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
# engine.say("Hello World!")
# engine.say('My current speaking rate is ' + str(rate))
# engine.runAndWait()
# engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
a=open('TrumpNewFF.srt').readlines()
i=2
l = len(a)
while i<l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i+3<l:
time_1 = a[i-1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000
time_1_hour = float(time_1[-2])*60000
time_2 = a[i+3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2])*60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000
duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil)
# create 1 sec of silence audio segment
one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds
print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)
#Either save modified audio
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format="wav")
i+=4
engine.stop()
|
flexible
|
{
"blob_id": "32f4f7ad61b99848c907e092c5ed7a839f0b352b",
"index": 6399,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine.setProperty('rate', 150)\n<mask token>\nprint(rate)\n<mask token>\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-3": "<mask token>\nengine = pyttsx3.init()\n<mask token>\nengine.setProperty('rate', 150)\nrate = engine.getProperty('rate')\nprint(rate)\n<mask token>\na = open('TrumpNewFF.srt').readlines()\ni = 2\nl = len(a)\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-4": "import pyttsx3\nfrom pydub import AudioSegment\nengine = pyttsx3.init()\n<mask token>\nengine.setProperty('rate', 150)\nrate = engine.getProperty('rate')\nprint(rate)\n<mask token>\na = open('TrumpNewFF.srt').readlines()\ni = 2\nl = len(a)\nwhile i < l:\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\n engine.runAndWait()\n if i + 3 < l:\n time_1 = a[i - 1].split(' --> ')[1].split(':')\n time_1_mil = time_1[-1].split(',')\n time_1_mil = int(time_1_mil[0]) * 1000 + int(time_1_mil[1]) % 1000\n time_1_hour = float(time_1[-2]) * 60000\n time_2 = a[i + 3].split(' --> ')[0].split(':')\n time_2_hour = float(time_2[-2]) * 60000\n time_2_mil = time_2[-1].split(',')\n time_2_mil = int(time_2_mil[0]) * 1000 + int(time_2_mil[1]) % 1000\n duration = float(time_2_hour + time_2_mil) - float(time_1_hour +\n time_1_mil)\n one_sec_segment = AudioSegment.silent(duration=int(duration))\n print(i, duration, time_2_hour + time_2_mil, time_1_hour + time_1_mil)\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i + 1)),\n format='wav')\n i += 4\nengine.stop()\n",
"step-5": "import pyttsx3\r\nfrom pydub import AudioSegment\r\n\r\nengine = pyttsx3.init() # object creation\r\n\r\n\"\"\" RATE\"\"\"\r\n #printing current voice rate\r\nengine.setProperty('rate', 150) # setting up new voice rate\r\nrate = engine.getProperty('rate') # getting details of current speaking rate\r\nprint (rate) \r\n\r\n\"\"\"VOLUME\"\"\"\r\n# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)\r\n# print (volume) #printing current volume level\r\n# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1\r\n\r\n# \"\"\"VOICE\"\"\"\r\n# voices = engine.getProperty('voices') #getting details of current voice\r\n# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male\r\n# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female\r\n\r\n# engine.say(\"Hello World!\")\r\n# engine.say('My current speaking rate is ' + str(rate))\r\n# engine.runAndWait()\r\n# engine.stop()\r\n\r\n\"\"\"Saving Voice to a file\"\"\"\r\n# On linux make sure that 'espeak' and 'ffmpeg' are installed\r\na=open('TrumpNewFF.srt').readlines()\r\ni=2\r\nl = len(a)\r\nwhile i<l:\r\n engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))\r\n engine.runAndWait()\r\n if i+3<l:\r\n time_1 = a[i-1].split(' --> ')[1].split(':')\r\n time_1_mil = time_1[-1].split(',')\r\n time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000\r\n time_1_hour = float(time_1[-2])*60000\r\n \r\n time_2 = a[i+3].split(' --> ')[0].split(':')\r\n time_2_hour = float(time_2[-2])*60000\r\n time_2_mil = time_2[-1].split(',')\r\n time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000\r\n \r\n duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil) \r\n # create 1 sec of silence audio segment\r\n one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds\r\n \r\n \r\n print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)\r\n #Either save modified audio\r\n one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format=\"wav\")\r\n i+=4\r\nengine.stop()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ErrorReport:
<|reserved_special_token_0|>
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_' + timestamp + '.txt.'
self.logFile = open(fileName, 'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ErrorReport:
def __init__(self):
return
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_' + timestamp + '.txt.'
self.logFile = open(fileName, 'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
def writeMessage(self, message=''):
self.logFile.write(message)
self.logFile.write('\n\n')
self.logFile.flush()
os.fsync(self.logFile)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getErrorReport():
errorReport = ErrorReport()
return errorReport
class ErrorReport:
def __init__(self):
return
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_' + timestamp + '.txt.'
self.logFile = open(fileName, 'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
def writeMessage(self, message=''):
self.logFile.write(message)
self.logFile.write('\n\n')
self.logFile.flush()
os.fsync(self.logFile)
<|reserved_special_token_1|>
import datetime
import traceback
import sys
import os
def getErrorReport():
errorReport = ErrorReport()
return errorReport
class ErrorReport:
def __init__(self):
return
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_' + timestamp + '.txt.'
self.logFile = open(fileName, 'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
def writeMessage(self, message=''):
self.logFile.write(message)
self.logFile.write('\n\n')
self.logFile.flush()
os.fsync(self.logFile)
<|reserved_special_token_1|>
import datetime
import traceback
import sys
import os
def getErrorReport():
errorReport = ErrorReport()
return errorReport
class ErrorReport():
def __init__(self):
return
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_'+timestamp+'.txt.'
self.logFile = open(fileName,'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
def writeMessage(self, message=''):
self.logFile.write(message)
self.logFile.write('\n\n')
self.logFile.flush()
os.fsync(self.logFile)
|
flexible
|
{
"blob_id": "6abc8b97117257e16da1f7b730b09ee0f7bd4c6e",
"index": 4715,
"step-1": "<mask token>\n\n\nclass ErrorReport:\n <mask token>\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-3": "<mask token>\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-4": "import datetime\nimport traceback\nimport sys\nimport os\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-5": "import datetime\nimport traceback\nimport sys\nimport os\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport(): \n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_'+timestamp+'.txt.'\n self.logFile = open(fileName,'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import numpy as np
import argparse
import torch
from gridworlds.envs import GridWorldEnv, generate_obs_dict
from gridworlds.constants import possible_objects
import nengo_spa as spa
from collections import OrderedDict
from spatial_semantic_pointers.utils import encode_point, ssp_to_loc, get_heatmap_vectors
import matplotlib.pyplot as plt
import seaborn as sns
seed=13
np.random.seed(seed)
maze_index = 0
ssp_dim = 512
n_sensors = 36
dataset = '/home/ctnuser/ssp-navigation/ssp_navigation/datasets/mixed_style_100mazes_100goals_64res_13size_13seed/maze_dataset.npz'
params = {
'continuous': True,
'fov': 360,
'n_sensors': n_sensors,
'max_sensor_dist': 10,
'normalize_dist_sensors': False,
'movement_type': 'holonomic',
'seed': seed,
# 'map_style': args.map_style,
'map_size': 10,
'fixed_episode_length': False, # Setting to false so location resets are not automatic
'episode_length': 1000, #200,
'max_lin_vel': 5,
'max_ang_vel': 5,
'dt': 0.1,
'full_map_obs': False,
'pob': 0,
'n_grid_cells': 0,
'heading': 'none',
'location': 'none',
'goal_loc': 'none',
'goal_vec': 'none',
'bc_n_ring': 0,
'hd_n_cells': 0,
'csp_dim': 0,
'goal_csp': False,
'agent_csp': False,
'goal_distance': 0,#args.goal_distance # 0 means completely random
}
obs_dict = generate_obs_dict(params)
np.random.seed(params['seed'])
data = np.load(dataset)
# n_mazes by size by size
coarse_mazes = data['coarse_mazes']
coarse_size = coarse_mazes.shape[1]
n_maps = coarse_mazes.shape[0]
# Size of map IDs. Equal to n_maps if using one-hot encoding
id_size = n_maps
map_id = np.zeros((n_maps,))
map_id[maze_index] = 1
map_id = torch.Tensor(map_id).unsqueeze(0)
# n_mazes by res by res
fine_mazes = data['fine_mazes']
xs = data['xs']
ys = data['ys']
res = fine_mazes.shape[1]
coarse_xs = np.linspace(xs[0], xs[-1], coarse_size)
coarse_ys = np.linspace(ys[0], ys[-1], coarse_size)
map_array = coarse_mazes[maze_index, :, :]
x_axis_sp = spa.SemanticPointer(data=data['x_axis_sp'])
y_axis_sp = spa.SemanticPointer(data=data['y_axis_sp'])
heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)
coarse_heatmap_vectors = get_heatmap_vectors(coarse_xs, coarse_ys, x_axis_sp, y_axis_sp)
# fixed random set of locations for the goals
limit_range = xs[-1] - xs[0]
goal_sps = data['goal_sps']
goals = data['goals']
# print(np.min(goals))
# print(np.max(goals))
goals_scaled = ((goals - xs[0]) / limit_range) * coarse_size
# print(np.min(goals_scaled))
# print(np.max(goals_scaled))
n_goals = 0#10 # TODO: make this a parameter
object_locations = OrderedDict()
vocab = {}
use_dataset_goals = False
for i in range(n_goals):
sp_name = possible_objects[i]
if use_dataset_goals:
object_locations[sp_name] = goals_scaled[maze_index, i] # using goal locations from the dataset
else:
# If set to None, the environment will choose a random free space on init
object_locations[sp_name] = None
# vocab[sp_name] = spa.SemanticPointer(ssp_dim)
vocab[sp_name] = spa.SemanticPointer(data=np.random.uniform(-1, 1, size=ssp_dim)).normalized()
env = GridWorldEnv(
map_array=map_array,
object_locations=object_locations, # object locations explicitly chosen so a fixed SSP memory can be given
observations=obs_dict,
movement_type=params['movement_type'],
max_lin_vel=params['max_lin_vel'],
max_ang_vel=params['max_ang_vel'],
continuous=params['continuous'],
max_steps=params['episode_length'],
fixed_episode_length=params['fixed_episode_length'],
dt=params['dt'],
screen_width=300,
screen_height=300,
debug_ghost=True,
)
# obs = env.reset(goal_distance=params['goal_distance'])
# env.set_agent_state(np.array([6, 9, 0]))
env.set_agent_state(np.array([3, 7, 0]))
env.step(np.array([0, 0]))
env.render()
env._render_extras()
sensors = env.get_dist_sensor_readings(
state=env.state,
n_sensors=params['n_sensors'],
fov_rad=params['fov']*np.pi/180.,
max_dist=params['max_sensor_dist'],
normalize=params['normalize_dist_sensors'],
)
fig, ax = plt.subplots(1, 1, figsize=(3, 3), tight_layout=True)
ax.bar(np.arange(len(sensors)), sensors)
ax.set_ylabel('Distance')
ax.set_xlabel('Sensor Index')
sns.despine()
plt.show()
|
normal
|
{
"blob_id": "34a456efc72b303aed5f722bb415d30ff62addab",
"index": 7391,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(seed)\n<mask token>\nnp.random.seed(params['seed'])\n<mask token>\nfor i in range(n_goals):\n sp_name = possible_objects[i]\n if use_dataset_goals:\n object_locations[sp_name] = goals_scaled[maze_index, i]\n else:\n object_locations[sp_name] = None\n vocab[sp_name] = spa.SemanticPointer(data=np.random.uniform(-1, 1, size\n =ssp_dim)).normalized()\n<mask token>\nenv.set_agent_state(np.array([3, 7, 0]))\nenv.step(np.array([0, 0]))\nenv.render()\nenv._render_extras()\n<mask token>\nax.bar(np.arange(len(sensors)), sensors)\nax.set_ylabel('Distance')\nax.set_xlabel('Sensor Index')\nsns.despine()\nplt.show()\n",
"step-3": "<mask token>\nseed = 13\nnp.random.seed(seed)\nmaze_index = 0\nssp_dim = 512\nn_sensors = 36\ndataset = (\n '/home/ctnuser/ssp-navigation/ssp_navigation/datasets/mixed_style_100mazes_100goals_64res_13size_13seed/maze_dataset.npz'\n )\nparams = {'continuous': True, 'fov': 360, 'n_sensors': n_sensors,\n 'max_sensor_dist': 10, 'normalize_dist_sensors': False, 'movement_type':\n 'holonomic', 'seed': seed, 'map_size': 10, 'fixed_episode_length': \n False, 'episode_length': 1000, 'max_lin_vel': 5, 'max_ang_vel': 5, 'dt':\n 0.1, 'full_map_obs': False, 'pob': 0, 'n_grid_cells': 0, 'heading':\n 'none', 'location': 'none', 'goal_loc': 'none', 'goal_vec': 'none',\n 'bc_n_ring': 0, 'hd_n_cells': 0, 'csp_dim': 0, 'goal_csp': False,\n 'agent_csp': False, 'goal_distance': 0}\nobs_dict = generate_obs_dict(params)\nnp.random.seed(params['seed'])\ndata = np.load(dataset)\ncoarse_mazes = data['coarse_mazes']\ncoarse_size = coarse_mazes.shape[1]\nn_maps = coarse_mazes.shape[0]\nid_size = n_maps\nmap_id = np.zeros((n_maps,))\nmap_id[maze_index] = 1\nmap_id = torch.Tensor(map_id).unsqueeze(0)\nfine_mazes = data['fine_mazes']\nxs = data['xs']\nys = data['ys']\nres = fine_mazes.shape[1]\ncoarse_xs = np.linspace(xs[0], xs[-1], coarse_size)\ncoarse_ys = np.linspace(ys[0], ys[-1], coarse_size)\nmap_array = coarse_mazes[maze_index, :, :]\nx_axis_sp = spa.SemanticPointer(data=data['x_axis_sp'])\ny_axis_sp = spa.SemanticPointer(data=data['y_axis_sp'])\nheatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)\ncoarse_heatmap_vectors = get_heatmap_vectors(coarse_xs, coarse_ys,\n x_axis_sp, y_axis_sp)\nlimit_range = xs[-1] - xs[0]\ngoal_sps = data['goal_sps']\ngoals = data['goals']\ngoals_scaled = (goals - xs[0]) / limit_range * coarse_size\nn_goals = 0\nobject_locations = OrderedDict()\nvocab = {}\nuse_dataset_goals = False\nfor i in range(n_goals):\n sp_name = possible_objects[i]\n if use_dataset_goals:\n object_locations[sp_name] = goals_scaled[maze_index, i]\n else:\n object_locations[sp_name] = None\n vocab[sp_name] = spa.SemanticPointer(data=np.random.uniform(-1, 1, size\n =ssp_dim)).normalized()\nenv = GridWorldEnv(map_array=map_array, object_locations=object_locations,\n observations=obs_dict, movement_type=params['movement_type'],\n max_lin_vel=params['max_lin_vel'], max_ang_vel=params['max_ang_vel'],\n continuous=params['continuous'], max_steps=params['episode_length'],\n fixed_episode_length=params['fixed_episode_length'], dt=params['dt'],\n screen_width=300, screen_height=300, debug_ghost=True)\nenv.set_agent_state(np.array([3, 7, 0]))\nenv.step(np.array([0, 0]))\nenv.render()\nenv._render_extras()\nsensors = env.get_dist_sensor_readings(state=env.state, n_sensors=params[\n 'n_sensors'], fov_rad=params['fov'] * np.pi / 180.0, max_dist=params[\n 'max_sensor_dist'], normalize=params['normalize_dist_sensors'])\nfig, ax = plt.subplots(1, 1, figsize=(3, 3), tight_layout=True)\nax.bar(np.arange(len(sensors)), sensors)\nax.set_ylabel('Distance')\nax.set_xlabel('Sensor Index')\nsns.despine()\nplt.show()\n",
"step-4": "import numpy as np\nimport argparse\nimport torch\nfrom gridworlds.envs import GridWorldEnv, generate_obs_dict\nfrom gridworlds.constants import possible_objects\nimport nengo_spa as spa\nfrom collections import OrderedDict\nfrom spatial_semantic_pointers.utils import encode_point, ssp_to_loc, get_heatmap_vectors\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nseed = 13\nnp.random.seed(seed)\nmaze_index = 0\nssp_dim = 512\nn_sensors = 36\ndataset = (\n '/home/ctnuser/ssp-navigation/ssp_navigation/datasets/mixed_style_100mazes_100goals_64res_13size_13seed/maze_dataset.npz'\n )\nparams = {'continuous': True, 'fov': 360, 'n_sensors': n_sensors,\n 'max_sensor_dist': 10, 'normalize_dist_sensors': False, 'movement_type':\n 'holonomic', 'seed': seed, 'map_size': 10, 'fixed_episode_length': \n False, 'episode_length': 1000, 'max_lin_vel': 5, 'max_ang_vel': 5, 'dt':\n 0.1, 'full_map_obs': False, 'pob': 0, 'n_grid_cells': 0, 'heading':\n 'none', 'location': 'none', 'goal_loc': 'none', 'goal_vec': 'none',\n 'bc_n_ring': 0, 'hd_n_cells': 0, 'csp_dim': 0, 'goal_csp': False,\n 'agent_csp': False, 'goal_distance': 0}\nobs_dict = generate_obs_dict(params)\nnp.random.seed(params['seed'])\ndata = np.load(dataset)\ncoarse_mazes = data['coarse_mazes']\ncoarse_size = coarse_mazes.shape[1]\nn_maps = coarse_mazes.shape[0]\nid_size = n_maps\nmap_id = np.zeros((n_maps,))\nmap_id[maze_index] = 1\nmap_id = torch.Tensor(map_id).unsqueeze(0)\nfine_mazes = data['fine_mazes']\nxs = data['xs']\nys = data['ys']\nres = fine_mazes.shape[1]\ncoarse_xs = np.linspace(xs[0], xs[-1], coarse_size)\ncoarse_ys = np.linspace(ys[0], ys[-1], coarse_size)\nmap_array = coarse_mazes[maze_index, :, :]\nx_axis_sp = spa.SemanticPointer(data=data['x_axis_sp'])\ny_axis_sp = spa.SemanticPointer(data=data['y_axis_sp'])\nheatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)\ncoarse_heatmap_vectors = get_heatmap_vectors(coarse_xs, coarse_ys,\n x_axis_sp, y_axis_sp)\nlimit_range = xs[-1] - xs[0]\ngoal_sps = data['goal_sps']\ngoals = data['goals']\ngoals_scaled = (goals - xs[0]) / limit_range * coarse_size\nn_goals = 0\nobject_locations = OrderedDict()\nvocab = {}\nuse_dataset_goals = False\nfor i in range(n_goals):\n sp_name = possible_objects[i]\n if use_dataset_goals:\n object_locations[sp_name] = goals_scaled[maze_index, i]\n else:\n object_locations[sp_name] = None\n vocab[sp_name] = spa.SemanticPointer(data=np.random.uniform(-1, 1, size\n =ssp_dim)).normalized()\nenv = GridWorldEnv(map_array=map_array, object_locations=object_locations,\n observations=obs_dict, movement_type=params['movement_type'],\n max_lin_vel=params['max_lin_vel'], max_ang_vel=params['max_ang_vel'],\n continuous=params['continuous'], max_steps=params['episode_length'],\n fixed_episode_length=params['fixed_episode_length'], dt=params['dt'],\n screen_width=300, screen_height=300, debug_ghost=True)\nenv.set_agent_state(np.array([3, 7, 0]))\nenv.step(np.array([0, 0]))\nenv.render()\nenv._render_extras()\nsensors = env.get_dist_sensor_readings(state=env.state, n_sensors=params[\n 'n_sensors'], fov_rad=params['fov'] * np.pi / 180.0, max_dist=params[\n 'max_sensor_dist'], normalize=params['normalize_dist_sensors'])\nfig, ax = plt.subplots(1, 1, figsize=(3, 3), tight_layout=True)\nax.bar(np.arange(len(sensors)), sensors)\nax.set_ylabel('Distance')\nax.set_xlabel('Sensor Index')\nsns.despine()\nplt.show()\n",
"step-5": "import numpy as np\nimport argparse\nimport torch\nfrom gridworlds.envs import GridWorldEnv, generate_obs_dict\nfrom gridworlds.constants import possible_objects\n\nimport nengo_spa as spa\nfrom collections import OrderedDict\nfrom spatial_semantic_pointers.utils import encode_point, ssp_to_loc, get_heatmap_vectors\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nseed=13\nnp.random.seed(seed)\nmaze_index = 0\n\nssp_dim = 512\nn_sensors = 36\n\ndataset = '/home/ctnuser/ssp-navigation/ssp_navigation/datasets/mixed_style_100mazes_100goals_64res_13size_13seed/maze_dataset.npz'\n\nparams = {\n 'continuous': True,\n 'fov': 360,\n 'n_sensors': n_sensors,\n 'max_sensor_dist': 10,\n 'normalize_dist_sensors': False,\n 'movement_type': 'holonomic',\n 'seed': seed,\n # 'map_style': args.map_style,\n 'map_size': 10,\n 'fixed_episode_length': False, # Setting to false so location resets are not automatic\n 'episode_length': 1000, #200,\n 'max_lin_vel': 5,\n 'max_ang_vel': 5,\n 'dt': 0.1,\n 'full_map_obs': False,\n 'pob': 0,\n 'n_grid_cells': 0,\n 'heading': 'none',\n 'location': 'none',\n 'goal_loc': 'none',\n 'goal_vec': 'none',\n 'bc_n_ring': 0,\n 'hd_n_cells': 0,\n 'csp_dim': 0,\n 'goal_csp': False,\n 'agent_csp': False,\n\n 'goal_distance': 0,#args.goal_distance # 0 means completely random\n}\n\nobs_dict = generate_obs_dict(params)\n\nnp.random.seed(params['seed'])\n\ndata = np.load(dataset)\n\n# n_mazes by size by size\ncoarse_mazes = data['coarse_mazes']\ncoarse_size = coarse_mazes.shape[1]\nn_maps = coarse_mazes.shape[0]\n\n# Size of map IDs. Equal to n_maps if using one-hot encoding\nid_size = n_maps\n\nmap_id = np.zeros((n_maps,))\nmap_id[maze_index] = 1\nmap_id = torch.Tensor(map_id).unsqueeze(0)\n\n# n_mazes by res by res\nfine_mazes = data['fine_mazes']\nxs = data['xs']\nys = data['ys']\nres = fine_mazes.shape[1]\n\ncoarse_xs = np.linspace(xs[0], xs[-1], coarse_size)\ncoarse_ys = np.linspace(ys[0], ys[-1], coarse_size)\n\nmap_array = coarse_mazes[maze_index, :, :]\n\nx_axis_sp = spa.SemanticPointer(data=data['x_axis_sp'])\ny_axis_sp = spa.SemanticPointer(data=data['y_axis_sp'])\nheatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)\ncoarse_heatmap_vectors = get_heatmap_vectors(coarse_xs, coarse_ys, x_axis_sp, y_axis_sp)\n\n# fixed random set of locations for the goals\nlimit_range = xs[-1] - xs[0]\n\ngoal_sps = data['goal_sps']\ngoals = data['goals']\n# print(np.min(goals))\n# print(np.max(goals))\ngoals_scaled = ((goals - xs[0]) / limit_range) * coarse_size\n# print(np.min(goals_scaled))\n# print(np.max(goals_scaled))\n\nn_goals = 0#10 # TODO: make this a parameter\nobject_locations = OrderedDict()\nvocab = {}\nuse_dataset_goals = False\nfor i in range(n_goals):\n sp_name = possible_objects[i]\n if use_dataset_goals:\n object_locations[sp_name] = goals_scaled[maze_index, i] # using goal locations from the dataset\n else:\n # If set to None, the environment will choose a random free space on init\n object_locations[sp_name] = None\n # vocab[sp_name] = spa.SemanticPointer(ssp_dim)\n vocab[sp_name] = spa.SemanticPointer(data=np.random.uniform(-1, 1, size=ssp_dim)).normalized()\n\nenv = GridWorldEnv(\n map_array=map_array,\n object_locations=object_locations, # object locations explicitly chosen so a fixed SSP memory can be given\n observations=obs_dict,\n movement_type=params['movement_type'],\n max_lin_vel=params['max_lin_vel'],\n max_ang_vel=params['max_ang_vel'],\n continuous=params['continuous'],\n max_steps=params['episode_length'],\n fixed_episode_length=params['fixed_episode_length'],\n dt=params['dt'],\n screen_width=300,\n screen_height=300,\n debug_ghost=True,\n)\n\n# obs = env.reset(goal_distance=params['goal_distance'])\n# env.set_agent_state(np.array([6, 9, 0]))\nenv.set_agent_state(np.array([3, 7, 0]))\nenv.step(np.array([0, 0]))\nenv.render()\nenv._render_extras()\n\nsensors = env.get_dist_sensor_readings(\n state=env.state,\n n_sensors=params['n_sensors'],\n fov_rad=params['fov']*np.pi/180.,\n max_dist=params['max_sensor_dist'],\n normalize=params['normalize_dist_sensors'],\n)\n\nfig, ax = plt.subplots(1, 1, figsize=(3, 3), tight_layout=True)\nax.bar(np.arange(len(sensors)), sensors)\nax.set_ylabel('Distance')\nax.set_xlabel('Sensor Index')\nsns.despine()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def create_meme(word):
return f'this is your meme NEW VERSION {word}'
|
normal
|
{
"blob_id": "32b3e65add5fb44320898b682e8f94f1460a32e7",
"index": 628,
"step-1": "<mask token>\n",
"step-2": "def create_meme(word):\n return f'this is your meme NEW VERSION {word}'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
from celery import shared_task
from djcelery.models import PeriodicTask, CrontabSchedule
import datetime
from django.db.models import Max, Count
from services import *
# 测试任务
@shared_task()
def excute_sql(x,y):
print "%d * %d = %d" % (x, y, x * y)
return x * y
# 监控任务:查询数据库并进行告警
@shared_task()
def monitor_sql(*args, **kwargs):
print kwargs["name"]
print kwargs["sql"]
task_name = kwargs["name"]
datasource = kwargs["datasource"]
sql = kwargs["sql"]
operator = kwargs["operator"]
threshold = kwargs["threshold"]
tasks = PeriodicTask.objects.filter(name=task_name)
if tasks.exists():
task = tasks[0]
data = get_sql_data(datasource, sql)
# -1:表示没查到数据,无法判断是否异常
sql_result = -1
monitor_result = -1
if len(data) > 0:
sql_result = data[0][0]
monitor_result = 0
# 达到设定阈值
if operator == ">=":
if sql_result >= threshold:
monitor_result = 1 # 异常
# 小于设定阈值
elif operator == "<":
if sql_result < threshold:
monitor_result = 1 # 异常
# 查询记录不变
elif operator == "==":
task_results = TaskResult.objects.filter(task_id=task.id)
if task_results.exists():
task_result_before = task_results.latest('last_run_time')
sql_data_before = task_result_before.sql_data
if sql_result == sql_data_before:
monitor_result = 1 # 异常
# 保存采集数据
task_result = TaskResult(task_id=task.id, task_name=task.name, last_run_time=datetime.datetime.now(),
operator=operator, threshold=threshold, sql_data=sql_result,
monitor_result=monitor_result)
task_result.save()
return sql_result
|
normal
|
{
"blob_id": "6f259210cbe8969046cba1031ab42d77e913abea",
"index": 6265,
"step-1": "# -*- coding: utf-8 -*-\nfrom celery import shared_task\nfrom djcelery.models import PeriodicTask, CrontabSchedule\nimport datetime\nfrom django.db.models import Max, Count\n\nfrom services import *\n\n\n# 测试任务\n@shared_task()\ndef excute_sql(x,y):\n print \"%d * %d = %d\" % (x, y, x * y)\n return x * y\n\n\n# 监控任务:查询数据库并进行告警\n@shared_task()\ndef monitor_sql(*args, **kwargs):\n print kwargs[\"name\"]\n print kwargs[\"sql\"]\n\n task_name = kwargs[\"name\"]\n datasource = kwargs[\"datasource\"]\n sql = kwargs[\"sql\"]\n operator = kwargs[\"operator\"]\n threshold = kwargs[\"threshold\"]\n tasks = PeriodicTask.objects.filter(name=task_name)\n if tasks.exists():\n task = tasks[0]\n data = get_sql_data(datasource, sql)\n # -1:表示没查到数据,无法判断是否异常\n sql_result = -1\n monitor_result = -1\n if len(data) > 0:\n sql_result = data[0][0]\n\n monitor_result = 0\n # 达到设定阈值\n if operator == \">=\":\n if sql_result >= threshold:\n monitor_result = 1 # 异常\n\n # 小于设定阈值\n elif operator == \"<\":\n if sql_result < threshold:\n monitor_result = 1 # 异常\n\n # 查询记录不变\n elif operator == \"==\":\n task_results = TaskResult.objects.filter(task_id=task.id)\n if task_results.exists():\n task_result_before = task_results.latest('last_run_time')\n sql_data_before = task_result_before.sql_data\n if sql_result == sql_data_before:\n monitor_result = 1 # 异常\n\n # 保存采集数据\n task_result = TaskResult(task_id=task.id, task_name=task.name, last_run_time=datetime.datetime.now(),\n operator=operator, threshold=threshold, sql_data=sql_result,\n monitor_result=monitor_result)\n task_result.save()\n\n return sql_result\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
<|reserved_special_token_1|>
###
# This Python module contains commented out classifiers that I will no longer
# be using
###
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Using Decision trees
# dt = DecisionTreeClassifier(max_depth=None)
# dt.fit(X_train_cv, y_train)
# print("DT Accuracy = " + str(dt.score(X_dev_cv, y_dev)))
# Using AdaBoost (takes too long)
# clf = DecisionTreeClassifier()
# ada = AdaBoostClassifier(clf)
# ada.fit(X_train_cv, y_train)
# print("ADA accuracy = " + str(ada.score(X_dev_cv, y_dev)))
# Using Bagging as a classifier with KNN
# clf = KNeighborsClassifier(n_neighbors=10)
# bag = BaggingClassifier(clf, max_features=0.5, max_samples=0.5)
# bag.fit(X_top10_train, y_top10_train)
# print("Bag accuracy = " + str(bag.score(X_top10_dev, y_top10_dev)))
# Using a random forest classifier
# rforest = RandomForestClassifier(max_depth=10000)
# rforest.fit(X_train_cv, y_train)
# print("Random Forest accuracy = " + str(rforest.score(X_dev_cv, y_dev)))
|
flexible
|
{
"blob_id": "5029f3e2000c25d6044f93201c698773e310d452",
"index": 3391,
"step-1": "<mask token>\n",
"step-2": "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n",
"step-3": "###\n# This Python module contains commented out classifiers that I will no longer\n# be using\n###\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\n# Using Decision trees\n# dt = DecisionTreeClassifier(max_depth=None)\n# dt.fit(X_train_cv, y_train)\n# print(\"DT Accuracy = \" + str(dt.score(X_dev_cv, y_dev)))\n\n# Using AdaBoost (takes too long)\n# clf = DecisionTreeClassifier()\n# ada = AdaBoostClassifier(clf)\n# ada.fit(X_train_cv, y_train)\n# print(\"ADA accuracy = \" + str(ada.score(X_dev_cv, y_dev)))\n\n# Using Bagging as a classifier with KNN\n# clf = KNeighborsClassifier(n_neighbors=10)\n# bag = BaggingClassifier(clf, max_features=0.5, max_samples=0.5)\n# bag.fit(X_top10_train, y_top10_train)\n# print(\"Bag accuracy = \" + str(bag.score(X_top10_dev, y_top10_dev)))\n\n# Using a random forest classifier\n# rforest = RandomForestClassifier(max_depth=10000)\n# rforest.fit(X_train_cv, y_train)\n# print(\"Random Forest accuracy = \" + str(rforest.score(X_dev_cv, y_dev)))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def squirrel_play(temp, is_summer):
if is_summer == True:
if 60 <= temp <= 100:
return True
else:
return False
if is_summer == False:
if 60 <= temp <= 90:
return True
else:
return False
|
normal
|
{
"blob_id": "48755cf48c6696259d0c319d382021f33751ac01",
"index": 9497,
"step-1": "<mask token>\n",
"step-2": "def squirrel_play(temp, is_summer):\n if is_summer == True:\n if 60 <= temp <= 100:\n return True\n else:\n return False\n if is_summer == False:\n if 60 <= temp <= 90:\n return True\n else:\n return False\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python
from program_class import Program
import tmdata
import os
def main():
""""""
args1 = {"progname" : "whoami",
"command" : "/usr/bin/whoami",
"procnum" : 1,
"autolaunch" : True,
"starttime" : 5,
"restart" : "never",
"retries" : 2,
"stopsig" : "SSIG",
"stoptime" : 10,
"exitcodes" : [0, 2, 4, 5],
"stdout" : "/usr/bin/whoami.stdout",
"stderr" : "/usr/bin/whoami.stderr",
"redout" : False,
"rederr" : False,
"envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
"workingdir" : "/tmp",
"umask" : "077"}
args2 = {"progname" : "top",
"command" : "/usr/bin/top",
"procnum" : 1,
"autolaunch" : True,
"starttime" : 5,
"restart" : "never",
"retries" : 2,
"stopsig" : "SSIG",
"stoptime" : 10,
"exitcodes" : [0, 2, 4, 5],
"stdout" : "/usr/bin/whois.stdout",
"stderr" : "/usr/bin/whois.stderr",
"redout" : False,
"rederr" : False,
"envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
"workingdir" : "/tmp",
"umask" : "077"}
# args1 = {"command" : "/C/Downloads/darkradiant-1.8.0-x64",
# "procnum" : 1,
# "autolaunch" : True,
# "starttime" : 5,
# "restart" : "never",
# "retries" : 2,
# "stopsig" : "SSIG",
# "stoptime" : 10,
# "exitcodes" : [0, 2, 4, 5],
# "stdout" : "/C/Downloads/darkradiant-1.8.0-x64.stdout",
# "stderr" : "/C/Downloads/darkradiant-1.8.0-x64.stderr",
# "redir" : "/C/Downloads/darkradiant-1.8.0-x64.redir",
# "envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
# "workingdir" : "/tmp",
# "umask" : "077"}
#
# args2 = {"command" : "/C/UsbFix/UsbFix.exe",
# "procnum" : 1,
# "autolaunch" : True,
# "starttime" : 5,
# "restart" : "never",
# "retries" : 2,
# "stopsig" : "SSIG",
# "stoptime" : 10,
# "exitcodes" : [0, 2, 4, 5],
# "stdout" : "/C/UsbFix/UsbFix.exe.stdout",
# "stderr" : "/C/UsbFix/UsbFix.exe.stderr",
# "redir" : "/C/UsbFix/UsbFix.exe.redir",
# "envvars" : {"ENV1" : "VAL1", "ENV2" : "VAL2"},
# "workingdir" : "/tmp",
# "umask" : "077"}
prog1 = Program(args1)
prog2 = Program(args2)
tmdata.saveProgram(prog1, "./config.xml", False)
tmdata.saveProgram(prog2, "./config.xml", False)
# tmdata.saveProgram(prog1, "./config.json", False)
# tmdata.saveProgram(prog2, "./config.json", False)
if __name__ == "__main__":
main();
|
normal
|
{
"blob_id": "c58f40d369388b94778e8583176f1ba8b81d0c5e",
"index": 4083,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"\"\"\"\n args1 = {'progname': 'whoami', 'command': '/usr/bin/whoami', 'procnum':\n 1, 'autolaunch': True, 'starttime': 5, 'restart': 'never',\n 'retries': 2, 'stopsig': 'SSIG', 'stoptime': 10, 'exitcodes': [0, 2,\n 4, 5], 'stdout': '/usr/bin/whoami.stdout', 'stderr':\n '/usr/bin/whoami.stderr', 'redout': False, 'rederr': False,\n 'envvars': {'ENV1': 'VAL1', 'ENV2': 'VAL2'}, 'workingdir': '/tmp',\n 'umask': '077'}\n args2 = {'progname': 'top', 'command': '/usr/bin/top', 'procnum': 1,\n 'autolaunch': True, 'starttime': 5, 'restart': 'never', 'retries': \n 2, 'stopsig': 'SSIG', 'stoptime': 10, 'exitcodes': [0, 2, 4, 5],\n 'stdout': '/usr/bin/whois.stdout', 'stderr':\n '/usr/bin/whois.stderr', 'redout': False, 'rederr': False,\n 'envvars': {'ENV1': 'VAL1', 'ENV2': 'VAL2'}, 'workingdir': '/tmp',\n 'umask': '077'}\n prog1 = Program(args1)\n prog2 = Program(args2)\n tmdata.saveProgram(prog1, './config.xml', False)\n tmdata.saveProgram(prog2, './config.xml', False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\"\"\"\"\n args1 = {'progname': 'whoami', 'command': '/usr/bin/whoami', 'procnum':\n 1, 'autolaunch': True, 'starttime': 5, 'restart': 'never',\n 'retries': 2, 'stopsig': 'SSIG', 'stoptime': 10, 'exitcodes': [0, 2,\n 4, 5], 'stdout': '/usr/bin/whoami.stdout', 'stderr':\n '/usr/bin/whoami.stderr', 'redout': False, 'rederr': False,\n 'envvars': {'ENV1': 'VAL1', 'ENV2': 'VAL2'}, 'workingdir': '/tmp',\n 'umask': '077'}\n args2 = {'progname': 'top', 'command': '/usr/bin/top', 'procnum': 1,\n 'autolaunch': True, 'starttime': 5, 'restart': 'never', 'retries': \n 2, 'stopsig': 'SSIG', 'stoptime': 10, 'exitcodes': [0, 2, 4, 5],\n 'stdout': '/usr/bin/whois.stdout', 'stderr':\n '/usr/bin/whois.stderr', 'redout': False, 'rederr': False,\n 'envvars': {'ENV1': 'VAL1', 'ENV2': 'VAL2'}, 'workingdir': '/tmp',\n 'umask': '077'}\n prog1 = Program(args1)\n prog2 = Program(args2)\n tmdata.saveProgram(prog1, './config.xml', False)\n tmdata.saveProgram(prog2, './config.xml', False)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from program_class import Program\nimport tmdata\nimport os\n\n\ndef main():\n \"\"\"\"\"\"\n args1 = {'progname': 'whoami', 'command': '/usr/bin/whoami', 'procnum':\n 1, 'autolaunch': True, 'starttime': 5, 'restart': 'never',\n 'retries': 2, 'stopsig': 'SSIG', 'stoptime': 10, 'exitcodes': [0, 2,\n 4, 5], 'stdout': '/usr/bin/whoami.stdout', 'stderr':\n '/usr/bin/whoami.stderr', 'redout': False, 'rederr': False,\n 'envvars': {'ENV1': 'VAL1', 'ENV2': 'VAL2'}, 'workingdir': '/tmp',\n 'umask': '077'}\n args2 = {'progname': 'top', 'command': '/usr/bin/top', 'procnum': 1,\n 'autolaunch': True, 'starttime': 5, 'restart': 'never', 'retries': \n 2, 'stopsig': 'SSIG', 'stoptime': 10, 'exitcodes': [0, 2, 4, 5],\n 'stdout': '/usr/bin/whois.stdout', 'stderr':\n '/usr/bin/whois.stderr', 'redout': False, 'rederr': False,\n 'envvars': {'ENV1': 'VAL1', 'ENV2': 'VAL2'}, 'workingdir': '/tmp',\n 'umask': '077'}\n prog1 = Program(args1)\n prog2 = Program(args2)\n tmdata.saveProgram(prog1, './config.xml', False)\n tmdata.saveProgram(prog2, './config.xml', False)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\nfrom program_class import Program\nimport tmdata\nimport os\n\ndef main():\n\t\"\"\"\"\"\"\n\targs1 = {\"progname\" : \"whoami\",\n\t\t\t\"command\" : \"/usr/bin/whoami\",\n\t\t\t\"procnum\" : 1,\n\t\t\t\"autolaunch\" : True,\n\t\t\t\"starttime\" : 5,\n\t\t\t\"restart\" : \"never\",\n\t\t\t\"retries\" : 2,\n\t\t\t\"stopsig\" : \"SSIG\",\n\t\t\t\"stoptime\" : 10,\n\t\t\t\"exitcodes\" : [0, 2, 4, 5],\n\t\t\t\"stdout\" : \"/usr/bin/whoami.stdout\",\n\t\t\t\"stderr\" : \"/usr/bin/whoami.stderr\",\n\t\t\t\"redout\" : False,\n\t\t\t\"rederr\" : False,\n\t\t\t\"envvars\" : {\"ENV1\" : \"VAL1\", \"ENV2\" : \"VAL2\"},\n\t\t\t\"workingdir\" : \"/tmp\",\n\t\t\t\"umask\" : \"077\"}\n\n\targs2 = {\"progname\" : \"top\",\n\t\t\t\"command\" : \"/usr/bin/top\",\n\t\t\t\"procnum\" : 1,\n\t\t\t\"autolaunch\" : True,\n\t\t\t\"starttime\" : 5,\n\t\t\t\"restart\" : \"never\",\n\t\t\t\"retries\" : 2,\n\t\t\t\"stopsig\" : \"SSIG\",\n\t\t\t\"stoptime\" : 10,\n\t\t\t\"exitcodes\" : [0, 2, 4, 5],\n\t\t\t\"stdout\" : \"/usr/bin/whois.stdout\",\n\t\t\t\"stderr\" : \"/usr/bin/whois.stderr\",\n\t\t\t\"redout\" : False,\n\t\t\t\"rederr\" : False,\n\t\t\t\"envvars\" : {\"ENV1\" : \"VAL1\", \"ENV2\" : \"VAL2\"},\n\t\t\t\"workingdir\" : \"/tmp\",\n\t\t\t\"umask\" : \"077\"}\n\n\t# args1 = {\"command\" : \"/C/Downloads/darkradiant-1.8.0-x64\",\n\t# \t\t\"procnum\" : 1,\n\t# \t\t\"autolaunch\" : True,\n\t# \t\t\"starttime\" : 5,\n\t# \t\t\"restart\" : \"never\",\n\t# \t\t\"retries\" : 2,\n\t# \t\t\"stopsig\" : \"SSIG\",\n\t# \t\t\"stoptime\" : 10,\n\t# \t\t\"exitcodes\" : [0, 2, 4, 5],\n\t# \t\t\"stdout\" : \"/C/Downloads/darkradiant-1.8.0-x64.stdout\",\n\t# \t\t\"stderr\" : \"/C/Downloads/darkradiant-1.8.0-x64.stderr\",\n\t# \t\t\"redir\" : \"/C/Downloads/darkradiant-1.8.0-x64.redir\",\n\t# \t\t\"envvars\" : {\"ENV1\" : \"VAL1\", \"ENV2\" : \"VAL2\"},\n\t# \t\t\"workingdir\" : \"/tmp\",\n\t# \t\t\"umask\" : \"077\"}\n\t#\n\t# args2 = {\"command\" : \"/C/UsbFix/UsbFix.exe\",\n\t# \t\t\"procnum\" : 1,\n\t# \t\t\"autolaunch\" : True,\n\t# \t\t\"starttime\" : 5,\n\t# \t\t\"restart\" : \"never\",\n\t# \t\t\"retries\" : 2,\n\t# \t\t\"stopsig\" : \"SSIG\",\n\t# \t\t\"stoptime\" : 10,\n\t# \t\t\"exitcodes\" : [0, 2, 4, 5],\n\t# \t\t\"stdout\" : \"/C/UsbFix/UsbFix.exe.stdout\",\n\t# \t\t\"stderr\" : \"/C/UsbFix/UsbFix.exe.stderr\",\n\t# \t\t\"redir\" : \"/C/UsbFix/UsbFix.exe.redir\",\n\t# \t\t\"envvars\" : {\"ENV1\" : \"VAL1\", \"ENV2\" : \"VAL2\"},\n\t# \t\t\"workingdir\" : \"/tmp\",\n\t# \t\t\"umask\" : \"077\"}\n\n\tprog1 = Program(args1)\n\tprog2 = Program(args2)\n\n\ttmdata.saveProgram(prog1, \"./config.xml\", False)\n\ttmdata.saveProgram(prog2, \"./config.xml\", False)\n\t# tmdata.saveProgram(prog1, \"./config.json\", False)\n\t# tmdata.saveProgram(prog2, \"./config.json\", False)\n\nif __name__ == \"__main__\":\n\tmain();\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from sklearn.linear_model import LinearRegression, LogisticRegression
import numpy as np
import pickle
import os
def Run(datasetFile):
# Get file from user
userFile = open(datasetFile, "r")
# Starter list of all instances of the data file
instanceList = []
instanceCount = 0
featureCount = 0
# put all instances in data file line by line into instanceList[]
for instance in userFile:
tempStr = instance
instanceCount += 1
# Be sure to seperate the entries by commas
for entry in tempStr.split(','):
instanceList.append(entry)
featureCount += 1
# Close file
userFile.close()
# Adjust size of feature count
featureCount = int(featureCount / instanceCount)
# With data now seperated we can make the numpy array and transpose it
dataFull = np.asarray(instanceList).reshape(instanceCount * featureCount).reshape(instanceCount, featureCount)
# Get rid of all the '\n' in array
for instance in range(instanceCount):
dataFull[instance][featureCount-1] = dataFull[instance][featureCount-1].rstrip("\n")
features = np.array(dataFull.T[0:featureCount-1]).astype(float).reshape(featureCount-1, instanceCount).T
target = np.array(dataFull.T[featureCount-1]).astype(float)
# Setup Machine Learning
isClassification = False
for i in range(len(target)):
if int(target[i]) == 0 or int(target[i]) == 1:
isClassification = True
else:
isClassification = False
break
mlModel = None
if isClassification:
mlModel = LogisticRegression().fit(features, target)
else:
mlModel = LinearRegression().fit(features, target)
# Make new file for Model data
tmpFileName, file_exe = os.path.splitext(datasetFile)
newFilePath = tmpFileName + "MODEL" + ".sav"
pickle.dump(mlModel, open(newFilePath, 'wb'))
|
normal
|
{
"blob_id": "ee7efea569b685ad8d6922e403421227e9ea6922",
"index": 6277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Run(datasetFile):\n userFile = open(datasetFile, 'r')\n instanceList = []\n instanceCount = 0\n featureCount = 0\n for instance in userFile:\n tempStr = instance\n instanceCount += 1\n for entry in tempStr.split(','):\n instanceList.append(entry)\n featureCount += 1\n userFile.close()\n featureCount = int(featureCount / instanceCount)\n dataFull = np.asarray(instanceList).reshape(instanceCount * featureCount\n ).reshape(instanceCount, featureCount)\n for instance in range(instanceCount):\n dataFull[instance][featureCount - 1] = dataFull[instance][\n featureCount - 1].rstrip('\\n')\n features = np.array(dataFull.T[0:featureCount - 1]).astype(float).reshape(\n featureCount - 1, instanceCount).T\n target = np.array(dataFull.T[featureCount - 1]).astype(float)\n isClassification = False\n for i in range(len(target)):\n if int(target[i]) == 0 or int(target[i]) == 1:\n isClassification = True\n else:\n isClassification = False\n break\n mlModel = None\n if isClassification:\n mlModel = LogisticRegression().fit(features, target)\n else:\n mlModel = LinearRegression().fit(features, target)\n tmpFileName, file_exe = os.path.splitext(datasetFile)\n newFilePath = tmpFileName + 'MODEL' + '.sav'\n pickle.dump(mlModel, open(newFilePath, 'wb'))\n",
"step-3": "from sklearn.linear_model import LinearRegression, LogisticRegression\nimport numpy as np\nimport pickle\nimport os\n\n\ndef Run(datasetFile):\n userFile = open(datasetFile, 'r')\n instanceList = []\n instanceCount = 0\n featureCount = 0\n for instance in userFile:\n tempStr = instance\n instanceCount += 1\n for entry in tempStr.split(','):\n instanceList.append(entry)\n featureCount += 1\n userFile.close()\n featureCount = int(featureCount / instanceCount)\n dataFull = np.asarray(instanceList).reshape(instanceCount * featureCount\n ).reshape(instanceCount, featureCount)\n for instance in range(instanceCount):\n dataFull[instance][featureCount - 1] = dataFull[instance][\n featureCount - 1].rstrip('\\n')\n features = np.array(dataFull.T[0:featureCount - 1]).astype(float).reshape(\n featureCount - 1, instanceCount).T\n target = np.array(dataFull.T[featureCount - 1]).astype(float)\n isClassification = False\n for i in range(len(target)):\n if int(target[i]) == 0 or int(target[i]) == 1:\n isClassification = True\n else:\n isClassification = False\n break\n mlModel = None\n if isClassification:\n mlModel = LogisticRegression().fit(features, target)\n else:\n mlModel = LinearRegression().fit(features, target)\n tmpFileName, file_exe = os.path.splitext(datasetFile)\n newFilePath = tmpFileName + 'MODEL' + '.sav'\n pickle.dump(mlModel, open(newFilePath, 'wb'))\n",
"step-4": "from sklearn.linear_model import LinearRegression, LogisticRegression\nimport numpy as np\nimport pickle\nimport os\n\ndef Run(datasetFile):\n \n # Get file from user\n userFile = open(datasetFile, \"r\")\n \n # Starter list of all instances of the data file\n instanceList = []\n instanceCount = 0\n featureCount = 0 \n \n # put all instances in data file line by line into instanceList[] \n for instance in userFile:\n tempStr = instance\n instanceCount += 1\n \n # Be sure to seperate the entries by commas\n for entry in tempStr.split(','):\n instanceList.append(entry)\n featureCount += 1\n \n # Close file\n userFile.close()\n \n # Adjust size of feature count\n featureCount = int(featureCount / instanceCount)\n \n # With data now seperated we can make the numpy array and transpose it \n dataFull = np.asarray(instanceList).reshape(instanceCount * featureCount).reshape(instanceCount, featureCount)\n \n # Get rid of all the '\\n' in array\n for instance in range(instanceCount):\n dataFull[instance][featureCount-1] = dataFull[instance][featureCount-1].rstrip(\"\\n\")\n \n features = np.array(dataFull.T[0:featureCount-1]).astype(float).reshape(featureCount-1, instanceCount).T\n target = np.array(dataFull.T[featureCount-1]).astype(float)\n \n # Setup Machine Learning\n isClassification = False\n for i in range(len(target)):\n if int(target[i]) == 0 or int(target[i]) == 1:\n isClassification = True\n else:\n isClassification = False\n break\n \n mlModel = None\n \n if isClassification:\n mlModel = LogisticRegression().fit(features, target)\n else:\n mlModel = LinearRegression().fit(features, target) \n\n \n # Make new file for Model data\n tmpFileName, file_exe = os.path.splitext(datasetFile)\n newFilePath = tmpFileName + \"MODEL\" + \".sav\"\n pickle.dump(mlModel, open(newFilePath, 'wb'))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-07 12:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('warhawks', '0012_auto_20180607_1815'),
('notification', '0002_auto_20180607_1759'),
]
operations = [
migrations.CreateModel(
name='N_lostandfound',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('message', models.CharField(max_length=100)),
('read', models.BooleanField(default=False)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_user_lost', to=settings.AUTH_USER_MODEL)),
('lf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LostAndFound')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LFComment')),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "c6c13ab24e4907eecf1db4fded28d4fc8126c834",
"index": 1170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('warhawks', '0012_auto_20180607_1815'), (\n 'notification', '0002_auto_20180607_1759')]\n operations = [migrations.CreateModel(name='N_lostandfound', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('date', models.DateTimeField\n (auto_now_add=True)), ('message', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)), ('from_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='from_user_lost', to=settings.AUTH_USER_MODEL)), ('lf',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'warhawks.LostAndFound')), ('post', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='warhawks.LFComment')), (\n 'to_user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('warhawks', '0012_auto_20180607_1815'), (\n 'notification', '0002_auto_20180607_1759')]\n operations = [migrations.CreateModel(name='N_lostandfound', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('date', models.DateTimeField\n (auto_now_add=True)), ('message', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)), ('from_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='from_user_lost', to=settings.AUTH_USER_MODEL)), ('lf',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'warhawks.LostAndFound')), ('post', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='warhawks.LFComment')), (\n 'to_user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-07 12:30\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('warhawks', '0012_auto_20180607_1815'),\n ('notification', '0002_auto_20180607_1759'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='N_lostandfound',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('message', models.CharField(max_length=100)),\n ('read', models.BooleanField(default=False)),\n ('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_user_lost', to=settings.AUTH_USER_MODEL)),\n ('lf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LostAndFound')),\n ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LFComment')),\n ('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
<|reserved_special_token_0|>
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
<|reserved_special_token_0|>
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
<|reserved_special_token_0|>
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
def color_def(BAvr, GAvr, RAvr):
global color
if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:
color = 'White'
return color
elif BAvr >= GAvr and BAvr >= RAvr:
if BAvr - GAvr > 3 and BAvr - RAvr >= 3:
color = 'Blue'
return color
elif BAvr - GAvr < 3:
color = 'Cyan'
return color
else:
color = 'Purple'
return color
elif GAvr >= RAvr and GAvr >= BAvr:
if GAvr - RAvr > 3 or GAvr - BAvr > 3:
color = 'Green'
return color
elif GAvr - RAvr < 3:
color = 'Yellow'
return color
else:
color = 'Cyan'
return color
elif RAvr >= GAvr and RAvr >= BAvr:
if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:
color = 'Red'
return color
elif RAvr - GAvr < 3:
color = 'Yellow'
return color
else:
color = 'Purple'
return color
else:
color = 'White'
<|reserved_special_token_0|>
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
refPt = []
PtBGR = []
r = []
g = []
b = []
refPt = []
Serial = []
PtBGR = []
r1 = []
r2 = []
r3 = []
r4 = []
rate = []
rate2 = []
rate3 = []
r6 = []
r7 = []
r8 = []
r9 = []
add = []
add2 = []
add3 = []
color_name = []
locate = []
brand = []
boolean = False
root = tk.Tk()
root.geometry('400x200')
root.configure(background='white')
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
img = PhotoImage(file='buttons/QJsmall.png')
panel = tk.Label(root, image=img)
panel.grid(row=0, column=0, columnspan=3)
labelmode = tk.Label(root, text="""請輸入圖片完整名稱
ex:104432 w7.jpg""", bg='white')
labelmode.configure(font=('微軟正黑體', 10))
labelmode.grid(row=1)
text = tk.Text(root, width=20, height=1)
text.insert('insert', '.jpg')
text.configure(font=('微軟正黑體', 10))
text.grid(row=1, column=2)
labelmode2 = tk.Label(root, text="""請輸入讀取資料庫名稱
ex:PureColorBig.csv""", bg=
'white')
labelmode2.configure(font=('微軟正黑體', 10))
labelmode2.grid(row=2)
text2 = tk.Text(root, width=20, height=1)
text2.insert('insert', 'PureColorBig.csv')
text2.configure(font=('微軟正黑體', 10))
text2.grid(row=2, column=2)
img_confirm = PhotoImage(file='buttons/confirm.png')
img_start = PhotoImage(file='buttons/start.png')
btnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',
command=getTextInput)
btnRead.grid(row=5, column=1)
btnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',
command=quitScreen)
btnRead2.grid(row=5, column=2)
root.mainloop()
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
def color_def(BAvr, GAvr, RAvr):
global color
if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:
color = 'White'
return color
elif BAvr >= GAvr and BAvr >= RAvr:
if BAvr - GAvr > 3 and BAvr - RAvr >= 3:
color = 'Blue'
return color
elif BAvr - GAvr < 3:
color = 'Cyan'
return color
else:
color = 'Purple'
return color
elif GAvr >= RAvr and GAvr >= BAvr:
if GAvr - RAvr > 3 or GAvr - BAvr > 3:
color = 'Green'
return color
elif GAvr - RAvr < 3:
color = 'Yellow'
return color
else:
color = 'Cyan'
return color
elif RAvr >= GAvr and RAvr >= BAvr:
if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:
color = 'Red'
return color
elif RAvr - GAvr < 3:
color = 'Yellow'
return color
else:
color = 'Purple'
return color
else:
color = 'White'
img = cv2.imdecode(np.fromfile('.pure\\%s' % result, dtype=np.uint8), -1)
cv2.namedWindow('mouse_callback')
cv2.setMouseCallback('mouse_callback', CircleCallback)
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import cv2
import numpy as np
import pandas as pd
import tkinter as tk
import random
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import Scale, Tk
from tkinter.ttk import Notebook
refPt = []
PtBGR = []
r = []
g = []
b = []
refPt = []
Serial = []
PtBGR = []
r1 = []
r2 = []
r3 = []
r4 = []
rate = []
rate2 = []
rate3 = []
r6 = []
r7 = []
r8 = []
r9 = []
add = []
add2 = []
add3 = []
color_name = []
locate = []
brand = []
boolean = False
root = tk.Tk()
root.geometry('400x200')
root.configure(background='white')
def quitScreen():
messagebox.showinfo('collecting data', '點擊視窗開始分析')
root.destroy()
root2 = Tk()
root2.destroy()
def getTextInput():
global result, result2
result = text.get(1.0, tk.END + '-1c')
result2 = text2.get(1.0, tk.END + '-1c')
img = PhotoImage(file='buttons/QJsmall.png')
panel = tk.Label(root, image=img)
panel.grid(row=0, column=0, columnspan=3)
labelmode = tk.Label(root, text="""請輸入圖片完整名稱
ex:104432 w7.jpg""", bg='white')
labelmode.configure(font=('微軟正黑體', 10))
labelmode.grid(row=1)
text = tk.Text(root, width=20, height=1)
text.insert('insert', '.jpg')
text.configure(font=('微軟正黑體', 10))
text.grid(row=1, column=2)
labelmode2 = tk.Label(root, text="""請輸入讀取資料庫名稱
ex:PureColorBig.csv""", bg=
'white')
labelmode2.configure(font=('微軟正黑體', 10))
labelmode2.grid(row=2)
text2 = tk.Text(root, width=20, height=1)
text2.insert('insert', 'PureColorBig.csv')
text2.configure(font=('微軟正黑體', 10))
text2.grid(row=2, column=2)
img_confirm = PhotoImage(file='buttons/confirm.png')
img_start = PhotoImage(file='buttons/start.png')
btnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',
command=getTextInput)
btnRead.grid(row=5, column=1)
btnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',
command=quitScreen)
btnRead2.grid(row=5, column=2)
root.mainloop()
def Result_Print():
window = Tk()
window.title('分析結果')
window.geometry('600x900')
frame2 = Frame(window)
frame2.pack(fill='both')
tablayout = Notebook(frame2)
tablayout2 = Notebook(frame2)
ntab1 = Frame(tablayout2)
ntab1.pack(fill='both')
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=25, height=2, text=name_n[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(name_n)):
for column in range(1):
label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab1, text='交叉配對結果')
ntab2 = Frame(tablayout2)
ntab2.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab2, text='配方1')
ntab3 = Frame(tablayout2)
ntab3.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab3, text='配方2')
ntab4 = Frame(tablayout2)
ntab4.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg
='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab4, text='配方3')
ntab5 = Frame(tablayout2)
ntab5.pack(fill='both')
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=ncol[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
for row in range(len(ncol)):
for column in range(1):
label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
ntab1.grid_columnconfigure(column, weight=1)
tablayout2.add(ntab5, text='最接近配方')
tab1 = Frame(tablayout)
tab1.pack(fill='both')
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=25, height=2, text=name[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=5, height=2, text='%s' % rate[row],
bg='black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(name)):
for column in range(1):
label = Label(tab1, width=12, height=2, text='% 相似程度', bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab1, text='顏色分類結果')
tab2 = Frame(tablayout)
tab2.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab2, width=22, height=1, text=row_df3[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab2, text='配方1')
tab3 = Frame(tablayout)
tab3.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab3, width=22, height=1, text=row_df32[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab3, text='配方2')
tab4 = Frame(tablayout)
tab4.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab4, width=22, height=1, text=row_df33[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab4, text='配方3')
tab5 = Frame(tablayout)
tab5.pack(fill='both')
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=col[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
for row in range(len(col)):
for column in range(1):
label = Label(tab5, width=22, height=1, text=row_text[row], bg=
'black', fg='white', padx=1, pady=1)
label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)
tab1.grid_columnconfigure(column, weight=1)
tablayout.add(tab5, text='最接近配方')
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event, x, y, flags, param):
n = 8
global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text
global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol
if event == cv2.EVENT_LBUTTONDOWN:
n = 500
for c in range(0, n):
c += 1
ranx = random.randint(0, 499)
rany = random.randint(0, 499)
refPt.append((ranx, rany))
b, g, r = img[ranx, rany]
PtBGR.append((b, g, r))
b = [x[0] for x in PtBGR]
g = [x[1] for x in PtBGR]
r = [x[2] for x in PtBGR]
if len(refPt) == n:
BAvr = round(sum(b[0:n]) / n)
GAvr = round(sum(g[0:n]) / n)
RAvr = round(sum(r[0:n]) / n)
SumRGB = BAvr + GAvr + RAvr
SumAvr = round(SumRGB / 3)
color_def(BAvr, GAvr, RAvr)
color_name.append(color)
AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,
'Avr': SumAvr, 'color': color_name}
df_test = pd.DataFrame(AvrRGB, index=[0])
dfread = pd.read_csv('.data base\\%s' % result2)
dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'
]) / 3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])
nfread = dfread[['Serial no', 'R', 'G', 'B']]
loan = pd.merge(nf, nfread)
group = loan.groupby('Serial no')
Newnf = group.count()
Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)
Newnf = Newnf.sort_values(by=['R'], ascending=False)
Rate = Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.
columns.values]
nf2 = pd.DataFrame(Newnf.to_records())
nf2 = nf2.head(5)
print(nf2)
if len(nf2['Serial no']) == 0:
i = 0
j = 0
k = 0
elif len(nf2['Serial no']) == 1:
i = nf2.at[0, 'Serial no']
j = 0
k = 0
elif len(nf2['Serial no']) == 2:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = 0
else:
i = nf2.at[0, 'Serial no']
j = nf2.at[1, 'Serial no']
k = nf2.at[2, 'Serial no']
print(k)
nf3 = dfread.loc[dfread['Serial no'] == i].head(1)
nf4 = dfread.loc[dfread['Serial no'] == j].head(1)
nf5 = dfread.loc[dfread['Serial no'] == k].head(1)
nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)
nf = pd.concat([nf3, nf4, nf5])
nf.to_csv('.data base\\test_result2.csv', index=False,
encoding='utf_8_sig')
print(nf)
ncol = list(nf.columns)
if len(nf2['Serial no']) == 0:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(nf2['Serial no']) == 1:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
elif len(nf2['Serial no']) == 2:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',
'x', 'x', 'x', 'x', 'x', 'x', 'x']
else:
row_nf3 = nf3.iloc[0].tolist()
row_nf32 = nf4.iloc[0].tolist()
print(row_nf32)
row_nf33 = nf5.iloc[0].tolist()
name_n = nf['Serial no'].tolist()
rate_n = Rate
"""
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
"""
"""
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
"""
newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'
] == SumAvr) | (dfread['S'] == SumRGB)]
newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))
newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))
newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))
newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))
df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True
).head(100)
df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])
)
df = df.sort_values(by=['dalta'], ascending=True)
data = df[['Serial no', 'color']]
group = data.groupby('Serial no')
datacount = group.count()
df = df.merge(datacount, left_on='Serial no', right_index=True)
df = df.sort_values(by=['color_y'], ascending=False)
df3 = df.drop_duplicates('Serial no', keep='first', inplace
=False).head()
print(df3)
df3.to_csv('.data base\\test_result.csv', index=False,
encoding='utf_8_sig')
if df3.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
elif len(df3) <= 2:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '只找到少數資料\n 已存在test_result')
else:
Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &
(df3['Bdif'] == 0)]
Zero = Zero.head(3)
if Zero.empty == False:
Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(Zero.columns)
row_text = Zero.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf = df3.loc[df3['A'] >= SumAvr]
filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']
).head()
Neg_filtdf = df3.loc[df3['A'] < SumAvr]
Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',
'Gdif', 'Bdif']).head()
if Neg_filtdf.empty == True and filtdf.empty == True:
root = tk.Tk()
root.withdraw()
messagebox.showinfo('失敗', '未找到符合資料')
else:
filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',
'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',
'Sdif', 'color_x', 'color_y'], axis=1)
name = df3['Serial no'].tolist()
rate = df3['color_y'].tolist()
col = list(filtdf.columns)
row_text = filtdf.iloc[0].tolist()
df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',
'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',
'color_x', 'color_y'], axis=1)
row_df3 = df3.iloc[0].tolist()
row_df32 = df3.iloc[1].tolist()
row_df33 = df3.iloc[2].tolist()
Result_Print()
print('最接近的為1', filtdf.head(1))
def color_def(BAvr, GAvr, RAvr):
global color
if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:
color = 'White'
return color
elif BAvr >= GAvr and BAvr >= RAvr:
if BAvr - GAvr > 3 and BAvr - RAvr >= 3:
color = 'Blue'
return color
elif BAvr - GAvr < 3:
color = 'Cyan'
return color
else:
color = 'Purple'
return color
elif GAvr >= RAvr and GAvr >= BAvr:
if GAvr - RAvr > 3 or GAvr - BAvr > 3:
color = 'Green'
return color
elif GAvr - RAvr < 3:
color = 'Yellow'
return color
else:
color = 'Cyan'
return color
elif RAvr >= GAvr and RAvr >= BAvr:
if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:
color = 'Red'
return color
elif RAvr - GAvr < 3:
color = 'Yellow'
return color
else:
color = 'Purple'
return color
else:
color = 'White'
img = cv2.imdecode(np.fromfile('.pure\\%s' % result, dtype=np.uint8), -1)
cv2.namedWindow('mouse_callback')
cv2.setMouseCallback('mouse_callback', CircleCallback)
def main():
while True:
cv2.imshow('mouse_callback', img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import cv2
import numpy as np
import pandas as pd
import tkinter as tk
import random
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import Scale,Tk
from tkinter.ttk import Notebook
refPt = []
PtBGR=[]
r=[]
g=[]
b=[]
refPt = []
Serial=[]
PtBGR=[]
r1=[]
r2=[]
r3=[]
r4=[]
rate=[]
rate2=[]
rate3=[]
r6=[]
r7=[]
r8=[]
r9=[]
add=[]
add2=[]
add3=[]
color_name=[]
locate=[]
brand=[]
boolean=False
root = tk.Tk()
root.geometry("400x200")
root.configure(background='white')
def quitScreen():
messagebox.showinfo("collecting data", "點擊視窗開始分析")
root.destroy()
root2=Tk()
root2.destroy()
def getTextInput():
global result,result2
result=text.get(1.0, tk.END+"-1c")
result2=text2.get(1.0, tk.END+"-1c")
img = PhotoImage(file="buttons/QJsmall.png")
panel = tk.Label(root, image = img)
panel.grid(row=0,column=0,columnspan=3)
labelmode = tk.Label(root,text = "請輸入圖片完整名稱\n ex:104432 w7.jpg",bg="white")
labelmode.configure(font=("微軟正黑體", 10))
labelmode.grid(row=1)
text=tk.Text(root, width=20,height=1)
text.insert("insert",".jpg")
text.configure(font=("微軟正黑體", 10))
text.grid(row=1,column=2)
labelmode2 = tk.Label(root,text = "請輸入讀取資料庫名稱\n ex:PureColorBig.csv",bg="white")
labelmode2.configure(font=("微軟正黑體", 10))
labelmode2.grid(row=2)
text2=tk.Text(root, width=20,height=1)
text2.insert("insert","PureColorBig.csv")
text2.configure(font=("微軟正黑體", 10))
text2.grid(row=2,column=2)
img_confirm=PhotoImage(file="buttons/confirm.png")
img_start=PhotoImage(file="buttons/start.png")
btnRead=tk.Button(root, image=img_confirm,text=" ",relief='flat',
command=getTextInput)
btnRead.grid(row=5,column=1)
btnRead2=tk.Button(root, image=img_start,text=" ",relief='flat',
command=quitScreen)
btnRead2.grid(row=5,column=2)
root.mainloop()
def Result_Print():
window=Tk()
window.title("分析結果")
window.geometry("600x900")
frame2=Frame(window)
frame2.pack(fill="both")
tablayout=Notebook(frame2)
tablayout2=Notebook(frame2)
#交叉配對
ntab1=Frame(tablayout2)
ntab1.pack(fill="both")
for row in range(len(name_n)):
for column in range(1):
label=Label(ntab1,width=25,height=2,text=name_n[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=column,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(name_n)):
for column in range(1):
label=Label(ntab1,width=5,height=2,text="%s" %rate_n[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(name_n)):
for column in range(1):
label=Label(ntab1,width=12,height=2,text="% 相似程度",bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=2,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab1,text="交叉配對結果")
ntab2=Frame(tablayout2)
ntab2.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab2,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab2,width=22,height=1,text=row_nf3[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab2,text="配方1")
ntab3=Frame(tablayout2)
ntab3.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab3,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab3,width=22,height=1,text=row_nf32[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab3,text="配方2")
ntab4=Frame(tablayout2)
ntab4.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab4,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab4,width=22,height=1,text=row_nf33[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab4,text="配方3")
ntab5=Frame(tablayout2)
ntab5.pack(fill="both")
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab5,width=22,height=1,text=ncol[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
for row in range(len(ncol)):
for column in range(1):
label=Label(ntab5,width=22,height=1,text=row_nf3[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
ntab1.grid_columnconfigure(column,weight=1)
tablayout2.add(ntab5,text="最接近配方")
#顏色分類
tab1=Frame(tablayout)
tab1.pack(fill="both")
for row in range(len(name)):
for column in range(1):
label=Label(tab1,width=25,height=2,text=name[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=column,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(name)):
for column in range(1):
label=Label(tab1,width=5,height=2,text="%s" %rate[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(name)):
for column in range(1):
label=Label(tab1,width=12,height=2,text="% 相似程度",bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=2,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab1,text="顏色分類結果")
tab2=Frame(tablayout)
tab2.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab2,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab2,width=22,height=1,text=row_df3[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab2,text="配方1")
tab3=Frame(tablayout)
tab3.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab3,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab3,width=22,height=1,text=row_df32[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab3,text="配方2")
tab4=Frame(tablayout)
tab4.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab4,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab4,width=22,height=1,text=row_df33[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab4,text="配方3")
tab5=Frame(tablayout)
tab5.pack(fill="both")
for row in range(len(col)):
for column in range(1):
label=Label(tab5,width=22,height=1,text=col[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=0,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
for row in range(len(col)):
for column in range(1):
label=Label(tab5,width=22,height=1,text=row_text[row],bg="black",fg="white",padx=1,pady=1)
label.grid(row=row,column=1,sticky="nsew",padx=1,pady=1)
tab1.grid_columnconfigure(column,weight=1)
tablayout.add(tab5,text="最接近配方")
tablayout.pack()
tablayout2.pack()
window.mainloop()
def CircleCallback(event,x,y,flags,param):
n=8
global refPt,PtBGR,w,h,Serial,r1,r2,r3,r4,rate,rate2,rate3,r6,r7,r8,r9,add,add2,add3,color,b,g,r,df3,name,rate,col,row_text
global row_df3,row_df32,row_df33,row_text2,row_nf3,row_nf32,nf3,row_nf33,name_n,rate_n,ncol
if event == cv2.EVENT_LBUTTONDOWN:
#下面n代表取樣點數 若n越大則越精準一般不建議超過1000
n=500
for c in range(0,n):
c+=1
#若n改變下面499改為n-1
ranx=(random.randint(0,499))
rany=(random.randint(0,499))
refPt.append((ranx,rany))
b, g, r = img[ranx,rany]
PtBGR.append((b,g,r))
#print(PtBGR[0:n])
b=[x[0] for x in PtBGR]
g=[x[1] for x in PtBGR]
r=[x[2] for x in PtBGR]
if len(refPt)==n:
BAvr=(round(sum(b[0:n])/n))
GAvr=(round(sum(g[0:n])/n))
RAvr=(round(sum(r[0:n])/n))
SumRGB=(BAvr+GAvr+RAvr)
SumAvr=(round(SumRGB/3))
color_def(BAvr,GAvr,RAvr)
color_name.append(color)
AvrRGB={'R':RAvr,'G':GAvr,'B':BAvr,'Sum':SumRGB,'Avr':SumAvr,'color':color_name}
df_test = pd.DataFrame(AvrRGB,index=[0])
dfread = pd.read_csv(".data base\\%s" %(result2))
dfread['A']= round((dfread['R'] + dfread['G'] + dfread['B'])/3)
dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']
#交叉比對法
nf=pd.DataFrame(list(zip(r,g,b)),columns=['R','G','B'])
nfread=dfread[['Serial no','R','G','B']]
loan=pd.merge(nf,nfread)
group=loan.groupby('Serial no')
Newnf=group.count()
Newnf['P']=round((Newnf['R']/Newnf['R'].sum())* 100)
Newnf=Newnf.sort_values(by=['R'],ascending=False)
Rate=Newnf['P'].tolist()
Newnf.columns = [' '.join(col).strip() for col in Newnf.columns.values]
nf2=pd.DataFrame(Newnf.to_records())
nf2=nf2.head(5)
print(nf2)
if(len(nf2['Serial no'])==0):
i=0
j=0
k=0
elif(len(nf2['Serial no'])==1):
i=nf2.at[0,'Serial no']
j=0
k=0
elif(len(nf2['Serial no'])==2):
i=nf2.at[0,'Serial no']
j=nf2.at[1,'Serial no']
k=0
else:
i=nf2.at[0,'Serial no']
j=nf2.at[1,'Serial no']
k=nf2.at[2,'Serial no']
print(k)
nf3=dfread.loc[(dfread['Serial no']==i)].head(1)
nf4=dfread.loc[(dfread['Serial no']==j)].head(1)
nf5=dfread.loc[(dfread['Serial no']==k)].head(1)
nf3=nf3.drop(['R','G','B','color','A','S'],axis=1)
nf4=nf4.drop(['R','G','B','color','A','S'],axis=1)
nf5=nf5.drop(['R','G','B','color','A','S'],axis=1)
nf=pd.concat([nf3, nf4,nf5])
nf.to_csv(".data base\\test_result2.csv",index=False,encoding="utf_8_sig")
print(nf)
ncol=list(nf.columns)
if(len(nf2['Serial no'])==0):
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "未找到符合資料")
elif(len(nf2['Serial no'])==1):
row_nf3=nf3.iloc[0].tolist()
row_nf32=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']
row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']
elif(len(nf2['Serial no'])==2):
row_nf3=nf3.iloc[0].tolist()
row_nf32=nf4.iloc[0].tolist()
row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']
else:
row_nf3=nf3.iloc[0].tolist()
row_nf32=nf4.iloc[0].tolist()
print(row_nf32)
row_nf33=nf5.iloc[0].tolist()
name_n=nf['Serial no'].tolist()
rate_n=Rate
#顏色分類法
#(可以改)當需要寬鬆一點的比對,刪除下面一段的上下兩個'''
'''
newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]
newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]
newdf=pd.concat([newdf1, newdf2])
'''
#(可以改)當需要嚴格一點的比對,刪除下面一段的上下兩個'''
'''
newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf=newdf.loc[(newdf['color']==color)]
'''
#並在下面一行的開頭加上#
newdf=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]
newdf.insert(1,'Rdif',newdf[['R']].add(-RAvr))
newdf.insert(2,'Gdif',newdf[['G']].add(-GAvr))
newdf.insert(3,'Bdif',newdf[['B']].add(-BAvr))
newdf.insert(4,'Adif',abs(newdf[['A']].add(-SumAvr)))
newdf.insert(5,'Sdif',abs(newdf[['S']].add(-SumRGB)))
df=newdf.sort_values(by=['Sdif', 'Adif'], ascending=True).head(100)
df.insert(1,'dalta',abs(df['Rdif']+df['Gdif']+df['Bdif']))
df=df.sort_values(by=['dalta'],ascending=True)
data=df[['Serial no','color']]
group=data.groupby('Serial no')
datacount=group.count()
df=df.merge(datacount,left_on='Serial no',right_index=True)
df=df.sort_values(by=['color_y'],ascending=False)
df3=df.drop_duplicates('Serial no', keep='first', inplace=False).head()
print(df3)
df3.to_csv(".data base\\test_result.csv",index=False,encoding="utf_8_sig")
if df3.empty ==True:
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "未找到符合資料")
elif len(df3)<=2:
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "只找到少數資料\n 已存在test_result")
else:
Zero=df3.loc[(df3['Rdif']==0)&(df3['Gdif']==0)&(df3['Bdif']==0)]
Zero=Zero.head(3)
if Zero.empty==False:
Zero=Zero.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
name=df3['Serial no'].tolist()
rate=df3['color_y'].tolist()
col=list(Zero.columns)
row_text=Zero.iloc[0].tolist()
df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
row_df3=df3.iloc[0].tolist()
row_df32=df3.iloc[1].tolist()
row_df33=df3.iloc[2].tolist()
Result_Print()
print('0')
print(Zero)
else:
filtdf=df3.loc[(df3['A']>=SumAvr)]
filtdf=filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()
Neg_filtdf=df3.loc[(df3['A']<SumAvr)]
Neg_filtdf=Neg_filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()
if Neg_filtdf.empty==True and filtdf.empty ==True:
root=tk.Tk()
root.withdraw()
messagebox.showinfo("失敗", "未找到符合資料")
else:
filtdf=filtdf.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
name=df3['Serial no'].tolist()
rate=df3['color_y'].tolist()
col=list(filtdf.columns)
row_text=filtdf.iloc[0].tolist()
df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)
row_df3=df3.iloc[0].tolist()
row_df32=df3.iloc[1].tolist()
row_df33=df3.iloc[2].tolist()
Result_Print()
print("最接近的為1",filtdf.head(1))
def color_def(BAvr,GAvr,RAvr):
global color
if abs(int(BAvr)-int(GAvr))<=1 and abs(int(BAvr)-int(RAvr))<=1:
color='White'
return color
elif BAvr>=GAvr and BAvr>=RAvr:
if BAvr-GAvr>3 and BAvr-RAvr>=3:
color='Blue'
return color
elif BAvr-GAvr<3:
color='Cyan'
return color
else:
color='Purple'
return color
elif GAvr>=RAvr and GAvr>=BAvr:
if GAvr-RAvr>3 or GAvr-BAvr>3:
color='Green'
return color
elif GAvr-RAvr<3:
color='Yellow'
return color
else:
color='Cyan'
return color
elif RAvr>=GAvr and RAvr>=BAvr:
if RAvr-GAvr>=3 and RAvr-BAvr>=3:
color='Red'
return color
elif RAvr-GAvr<3:
color='Yellow'
return color
else:
color='Purple'
return color
else:
color='White'
#img=cv2.imdecode(np.fromfile(r"D:\桌面\JA Material\JA-material\pure\%s" % (result),dtype=np.uint8),-1)
img=cv2.imdecode(np.fromfile(r".pure\%s" % (result),dtype=np.uint8),-1)
cv2.namedWindow('mouse_callback')
# bind the callback function to window
cv2.setMouseCallback('mouse_callback',CircleCallback)
def main():
while (True):
cv2.imshow('mouse_callback',img)
if cv2.waitKey(20) == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "a126b1775ffe1ba1aebc288ce17fac8ada0b0756",
"index": 312,
"step-1": "<mask token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\n<mask token>\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\n<mask token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\n<mask token>\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\n<mask token>\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\n<mask token>\n",
"step-3": "<mask token>\nrefPt = []\nPtBGR = []\nr = []\ng = []\nb = []\nrefPt = []\nSerial = []\nPtBGR = []\nr1 = []\nr2 = []\nr3 = []\nr4 = []\nrate = []\nrate2 = []\nrate3 = []\nr6 = []\nr7 = []\nr8 = []\nr9 = []\nadd = []\nadd2 = []\nadd3 = []\ncolor_name = []\nlocate = []\nbrand = []\nboolean = False\nroot = tk.Tk()\nroot.geometry('400x200')\nroot.configure(background='white')\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\nimg = PhotoImage(file='buttons/QJsmall.png')\npanel = tk.Label(root, image=img)\npanel.grid(row=0, column=0, columnspan=3)\nlabelmode = tk.Label(root, text=\"\"\"請輸入圖片完整名稱\n ex:104432 w7.jpg\"\"\", bg='white')\nlabelmode.configure(font=('微軟正黑體', 10))\nlabelmode.grid(row=1)\ntext = tk.Text(root, width=20, height=1)\ntext.insert('insert', '.jpg')\ntext.configure(font=('微軟正黑體', 10))\ntext.grid(row=1, column=2)\nlabelmode2 = tk.Label(root, text=\"\"\"請輸入讀取資料庫名稱\n ex:PureColorBig.csv\"\"\", bg=\n 'white')\nlabelmode2.configure(font=('微軟正黑體', 10))\nlabelmode2.grid(row=2)\ntext2 = tk.Text(root, width=20, height=1)\ntext2.insert('insert', 'PureColorBig.csv')\ntext2.configure(font=('微軟正黑體', 10))\ntext2.grid(row=2, column=2)\nimg_confirm = PhotoImage(file='buttons/confirm.png')\nimg_start = PhotoImage(file='buttons/start.png')\nbtnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',\n command=getTextInput)\nbtnRead.grid(row=5, column=1)\nbtnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',\n command=quitScreen)\nbtnRead2.grid(row=5, column=2)\nroot.mainloop()\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\nimg = cv2.imdecode(np.fromfile('.pure\\\\%s' % result, dtype=np.uint8), -1)\ncv2.namedWindow('mouse_callback')\ncv2.setMouseCallback('mouse_callback', CircleCallback)\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import cv2\nimport numpy as np\nimport pandas as pd\nimport tkinter as tk\nimport random\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import Scale, Tk\nfrom tkinter.ttk import Notebook\nrefPt = []\nPtBGR = []\nr = []\ng = []\nb = []\nrefPt = []\nSerial = []\nPtBGR = []\nr1 = []\nr2 = []\nr3 = []\nr4 = []\nrate = []\nrate2 = []\nrate3 = []\nr6 = []\nr7 = []\nr8 = []\nr9 = []\nadd = []\nadd2 = []\nadd3 = []\ncolor_name = []\nlocate = []\nbrand = []\nboolean = False\nroot = tk.Tk()\nroot.geometry('400x200')\nroot.configure(background='white')\n\n\ndef quitScreen():\n messagebox.showinfo('collecting data', '點擊視窗開始分析')\n root.destroy()\n root2 = Tk()\n root2.destroy()\n\n\ndef getTextInput():\n global result, result2\n result = text.get(1.0, tk.END + '-1c')\n result2 = text2.get(1.0, tk.END + '-1c')\n\n\nimg = PhotoImage(file='buttons/QJsmall.png')\npanel = tk.Label(root, image=img)\npanel.grid(row=0, column=0, columnspan=3)\nlabelmode = tk.Label(root, text=\"\"\"請輸入圖片完整名稱\n ex:104432 w7.jpg\"\"\", bg='white')\nlabelmode.configure(font=('微軟正黑體', 10))\nlabelmode.grid(row=1)\ntext = tk.Text(root, width=20, height=1)\ntext.insert('insert', '.jpg')\ntext.configure(font=('微軟正黑體', 10))\ntext.grid(row=1, column=2)\nlabelmode2 = tk.Label(root, text=\"\"\"請輸入讀取資料庫名稱\n ex:PureColorBig.csv\"\"\", bg=\n 'white')\nlabelmode2.configure(font=('微軟正黑體', 10))\nlabelmode2.grid(row=2)\ntext2 = tk.Text(root, width=20, height=1)\ntext2.insert('insert', 'PureColorBig.csv')\ntext2.configure(font=('微軟正黑體', 10))\ntext2.grid(row=2, column=2)\nimg_confirm = PhotoImage(file='buttons/confirm.png')\nimg_start = PhotoImage(file='buttons/start.png')\nbtnRead = tk.Button(root, image=img_confirm, text=' ', relief='flat',\n command=getTextInput)\nbtnRead.grid(row=5, column=1)\nbtnRead2 = tk.Button(root, image=img_start, text=' ', relief='flat',\n command=quitScreen)\nbtnRead2.grid(row=5, column=2)\nroot.mainloop()\n\n\ndef Result_Print():\n window = Tk()\n window.title('分析結果')\n window.geometry('600x900')\n frame2 = Frame(window)\n frame2.pack(fill='both')\n tablayout = Notebook(frame2)\n tablayout2 = Notebook(frame2)\n ntab1 = Frame(tablayout2)\n ntab1.pack(fill='both')\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=25, height=2, text=name_n[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=5, height=2, text='%s' % rate_n[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label = Label(ntab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab1, text='交叉配對結果')\n ntab2 = Frame(tablayout2)\n ntab2.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab2, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab2, text='配方1')\n ntab3 = Frame(tablayout2)\n ntab3.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab3, width=22, height=1, text=row_nf32[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab3, text='配方2')\n ntab4 = Frame(tablayout2)\n ntab4.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab4, width=22, height=1, text=row_nf33[row], bg\n ='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab4, text='配方3')\n ntab5 = Frame(tablayout2)\n ntab5.pack(fill='both')\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=ncol[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n for row in range(len(ncol)):\n for column in range(1):\n label = Label(ntab5, width=22, height=1, text=row_nf3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n ntab1.grid_columnconfigure(column, weight=1)\n tablayout2.add(ntab5, text='最接近配方')\n tab1 = Frame(tablayout)\n tab1.pack(fill='both')\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=25, height=2, text=name[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=column, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=5, height=2, text='%s' % rate[row],\n bg='black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(name)):\n for column in range(1):\n label = Label(tab1, width=12, height=2, text='% 相似程度', bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=2, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab1, text='顏色分類結果')\n tab2 = Frame(tablayout)\n tab2.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab2, width=22, height=1, text=row_df3[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab2, text='配方1')\n tab3 = Frame(tablayout)\n tab3.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab3, width=22, height=1, text=row_df32[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab3, text='配方2')\n tab4 = Frame(tablayout)\n tab4.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab4, width=22, height=1, text=row_df33[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab4, text='配方3')\n tab5 = Frame(tablayout)\n tab5.pack(fill='both')\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=col[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=0, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n for row in range(len(col)):\n for column in range(1):\n label = Label(tab5, width=22, height=1, text=row_text[row], bg=\n 'black', fg='white', padx=1, pady=1)\n label.grid(row=row, column=1, sticky='nsew', padx=1, pady=1)\n tab1.grid_columnconfigure(column, weight=1)\n tablayout.add(tab5, text='最接近配方')\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n\n\ndef CircleCallback(event, x, y, flags, param):\n n = 8\n global refPt, PtBGR, w, h, Serial, r1, r2, r3, r4, rate, rate2, rate3, r6, r7, r8, r9, add, add2, add3, color, b, g, r, df3, name, rate, col, row_text\n global row_df3, row_df32, row_df33, row_text2, row_nf3, row_nf32, nf3, row_nf33, name_n, rate_n, ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n n = 500\n for c in range(0, n):\n c += 1\n ranx = random.randint(0, 499)\n rany = random.randint(0, 499)\n refPt.append((ranx, rany))\n b, g, r = img[ranx, rany]\n PtBGR.append((b, g, r))\n b = [x[0] for x in PtBGR]\n g = [x[1] for x in PtBGR]\n r = [x[2] for x in PtBGR]\n if len(refPt) == n:\n BAvr = round(sum(b[0:n]) / n)\n GAvr = round(sum(g[0:n]) / n)\n RAvr = round(sum(r[0:n]) / n)\n SumRGB = BAvr + GAvr + RAvr\n SumAvr = round(SumRGB / 3)\n color_def(BAvr, GAvr, RAvr)\n color_name.append(color)\n AvrRGB = {'R': RAvr, 'G': GAvr, 'B': BAvr, 'Sum': SumRGB,\n 'Avr': SumAvr, 'color': color_name}\n df_test = pd.DataFrame(AvrRGB, index=[0])\n dfread = pd.read_csv('.data base\\\\%s' % result2)\n dfread['A'] = round((dfread['R'] + dfread['G'] + dfread['B'\n ]) / 3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n nf = pd.DataFrame(list(zip(r, g, b)), columns=['R', 'G', 'B'])\n nfread = dfread[['Serial no', 'R', 'G', 'B']]\n loan = pd.merge(nf, nfread)\n group = loan.groupby('Serial no')\n Newnf = group.count()\n Newnf['P'] = round(Newnf['R'] / Newnf['R'].sum() * 100)\n Newnf = Newnf.sort_values(by=['R'], ascending=False)\n Rate = Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.\n columns.values]\n nf2 = pd.DataFrame(Newnf.to_records())\n nf2 = nf2.head(5)\n print(nf2)\n if len(nf2['Serial no']) == 0:\n i = 0\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 1:\n i = nf2.at[0, 'Serial no']\n j = 0\n k = 0\n elif len(nf2['Serial no']) == 2:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = 0\n else:\n i = nf2.at[0, 'Serial no']\n j = nf2.at[1, 'Serial no']\n k = nf2.at[2, 'Serial no']\n print(k)\n nf3 = dfread.loc[dfread['Serial no'] == i].head(1)\n nf4 = dfread.loc[dfread['Serial no'] == j].head(1)\n nf5 = dfread.loc[dfread['Serial no'] == k].head(1)\n nf3 = nf3.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf4 = nf4.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf5 = nf5.drop(['R', 'G', 'B', 'color', 'A', 'S'], axis=1)\n nf = pd.concat([nf3, nf4, nf5])\n nf.to_csv('.data base\\\\test_result2.csv', index=False,\n encoding='utf_8_sig')\n print(nf)\n ncol = list(nf.columns)\n if len(nf2['Serial no']) == 0:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(nf2['Serial no']) == 1:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n elif len(nf2['Serial no']) == 2:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n row_nf33 = ['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x',\n 'x', 'x', 'x', 'x', 'x', 'x', 'x']\n else:\n row_nf3 = nf3.iloc[0].tolist()\n row_nf32 = nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33 = nf5.iloc[0].tolist()\n name_n = nf['Serial no'].tolist()\n rate_n = Rate\n \"\"\"\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n \"\"\"\n \"\"\"\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n \"\"\"\n newdf = dfread.loc[(dfread['color'] == color) | (dfread['A'\n ] == SumAvr) | (dfread['S'] == SumRGB)]\n newdf.insert(1, 'Rdif', newdf[['R']].add(-RAvr))\n newdf.insert(2, 'Gdif', newdf[['G']].add(-GAvr))\n newdf.insert(3, 'Bdif', newdf[['B']].add(-BAvr))\n newdf.insert(4, 'Adif', abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5, 'Sdif', abs(newdf[['S']].add(-SumRGB)))\n df = newdf.sort_values(by=['Sdif', 'Adif'], ascending=True\n ).head(100)\n df.insert(1, 'dalta', abs(df['Rdif'] + df['Gdif'] + df['Bdif'])\n )\n df = df.sort_values(by=['dalta'], ascending=True)\n data = df[['Serial no', 'color']]\n group = data.groupby('Serial no')\n datacount = group.count()\n df = df.merge(datacount, left_on='Serial no', right_index=True)\n df = df.sort_values(by=['color_y'], ascending=False)\n df3 = df.drop_duplicates('Serial no', keep='first', inplace\n =False).head()\n print(df3)\n df3.to_csv('.data base\\\\test_result.csv', index=False,\n encoding='utf_8_sig')\n if df3.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n elif len(df3) <= 2:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '只找到少數資料\\n 已存在test_result')\n else:\n Zero = df3.loc[(df3['Rdif'] == 0) & (df3['Gdif'] == 0) &\n (df3['Bdif'] == 0)]\n Zero = Zero.head(3)\n if Zero.empty == False:\n Zero = Zero.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(Zero.columns)\n row_text = Zero.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n else:\n filtdf = df3.loc[df3['A'] >= SumAvr]\n filtdf = filtdf.sort_values(by=['Rdif', 'Gdif', 'Bdif']\n ).head()\n Neg_filtdf = df3.loc[df3['A'] < SumAvr]\n Neg_filtdf = Neg_filtdf.sort_values(by=['Rdif',\n 'Gdif', 'Bdif']).head()\n if Neg_filtdf.empty == True and filtdf.empty == True:\n root = tk.Tk()\n root.withdraw()\n messagebox.showinfo('失敗', '未找到符合資料')\n else:\n filtdf = filtdf.drop(['R', 'G', 'B', 'dalta',\n 'Rdif', 'Gdif', 'Bdif', 'A', 'S', 'Adif',\n 'Sdif', 'color_x', 'color_y'], axis=1)\n name = df3['Serial no'].tolist()\n rate = df3['color_y'].tolist()\n col = list(filtdf.columns)\n row_text = filtdf.iloc[0].tolist()\n df3 = df3.drop(['R', 'G', 'B', 'dalta', 'Rdif',\n 'Gdif', 'Bdif', 'A', 'S', 'Adif', 'Sdif',\n 'color_x', 'color_y'], axis=1)\n row_df3 = df3.iloc[0].tolist()\n row_df32 = df3.iloc[1].tolist()\n row_df33 = df3.iloc[2].tolist()\n Result_Print()\n print('最接近的為1', filtdf.head(1))\n\n\ndef color_def(BAvr, GAvr, RAvr):\n global color\n if abs(int(BAvr) - int(GAvr)) <= 1 and abs(int(BAvr) - int(RAvr)) <= 1:\n color = 'White'\n return color\n elif BAvr >= GAvr and BAvr >= RAvr:\n if BAvr - GAvr > 3 and BAvr - RAvr >= 3:\n color = 'Blue'\n return color\n elif BAvr - GAvr < 3:\n color = 'Cyan'\n return color\n else:\n color = 'Purple'\n return color\n elif GAvr >= RAvr and GAvr >= BAvr:\n if GAvr - RAvr > 3 or GAvr - BAvr > 3:\n color = 'Green'\n return color\n elif GAvr - RAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Cyan'\n return color\n elif RAvr >= GAvr and RAvr >= BAvr:\n if RAvr - GAvr >= 3 and RAvr - BAvr >= 3:\n color = 'Red'\n return color\n elif RAvr - GAvr < 3:\n color = 'Yellow'\n return color\n else:\n color = 'Purple'\n return color\n else:\n color = 'White'\n\n\nimg = cv2.imdecode(np.fromfile('.pure\\\\%s' % result, dtype=np.uint8), -1)\ncv2.namedWindow('mouse_callback')\ncv2.setMouseCallback('mouse_callback', CircleCallback)\n\n\ndef main():\n while True:\n cv2.imshow('mouse_callback', img)\n if cv2.waitKey(20) == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import cv2\nimport numpy as np\nimport pandas as pd\nimport tkinter as tk\nimport random\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter import Scale,Tk\nfrom tkinter.ttk import Notebook\n\nrefPt = []\nPtBGR=[]\nr=[]\ng=[]\nb=[]\nrefPt = []\nSerial=[]\nPtBGR=[]\nr1=[]\nr2=[]\nr3=[]\nr4=[]\nrate=[]\nrate2=[]\nrate3=[]\nr6=[]\nr7=[]\nr8=[]\nr9=[]\nadd=[]\nadd2=[]\nadd3=[]\ncolor_name=[]\nlocate=[]\nbrand=[]\nboolean=False\n\n\nroot = tk.Tk()\nroot.geometry(\"400x200\")\nroot.configure(background='white')\n\ndef quitScreen():\n messagebox.showinfo(\"collecting data\", \"點擊視窗開始分析\")\n root.destroy()\n root2=Tk()\n root2.destroy()\n \ndef getTextInput():\n global result,result2\n result=text.get(1.0, tk.END+\"-1c\")\n result2=text2.get(1.0, tk.END+\"-1c\")\n\nimg = PhotoImage(file=\"buttons/QJsmall.png\")\npanel = tk.Label(root, image = img)\npanel.grid(row=0,column=0,columnspan=3)\n\nlabelmode = tk.Label(root,text = \"請輸入圖片完整名稱\\n ex:104432 w7.jpg\",bg=\"white\")\nlabelmode.configure(font=(\"微軟正黑體\", 10))\nlabelmode.grid(row=1)\ntext=tk.Text(root, width=20,height=1)\ntext.insert(\"insert\",\".jpg\")\ntext.configure(font=(\"微軟正黑體\", 10))\ntext.grid(row=1,column=2)\n\nlabelmode2 = tk.Label(root,text = \"請輸入讀取資料庫名稱\\n ex:PureColorBig.csv\",bg=\"white\")\nlabelmode2.configure(font=(\"微軟正黑體\", 10))\nlabelmode2.grid(row=2)\ntext2=tk.Text(root, width=20,height=1)\ntext2.insert(\"insert\",\"PureColorBig.csv\")\ntext2.configure(font=(\"微軟正黑體\", 10))\ntext2.grid(row=2,column=2)\n\nimg_confirm=PhotoImage(file=\"buttons/confirm.png\")\nimg_start=PhotoImage(file=\"buttons/start.png\")\nbtnRead=tk.Button(root, image=img_confirm,text=\" \",relief='flat', \n command=getTextInput)\n\nbtnRead.grid(row=5,column=1)\n\nbtnRead2=tk.Button(root, image=img_start,text=\" \",relief='flat', \n command=quitScreen)\n\nbtnRead2.grid(row=5,column=2)\n\nroot.mainloop()\n\n\n\n\ndef Result_Print():\n window=Tk()\n window.title(\"分析結果\")\n window.geometry(\"600x900\")\n \n frame2=Frame(window)\n frame2.pack(fill=\"both\")\n\n \n tablayout=Notebook(frame2)\n tablayout2=Notebook(frame2)\n\n\n #交叉配對\n ntab1=Frame(tablayout2)\n ntab1.pack(fill=\"both\")\n for row in range(len(name_n)):\n for column in range(1):\n label=Label(ntab1,width=25,height=2,text=name_n[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=column,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n for row in range(len(name_n)):\n for column in range(1):\n label=Label(ntab1,width=5,height=2,text=\"%s\" %rate_n[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(name_n)):\n for column in range(1):\n label=Label(ntab1,width=12,height=2,text=\"% 相似程度\",bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=2,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n tablayout2.add(ntab1,text=\"交叉配對結果\")\n\n ntab2=Frame(tablayout2)\n ntab2.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab2,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab2,width=22,height=1,text=row_nf3[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab2,text=\"配方1\")\n\n ntab3=Frame(tablayout2)\n ntab3.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab3,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab3,width=22,height=1,text=row_nf32[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab3,text=\"配方2\")\n\n ntab4=Frame(tablayout2)\n ntab4.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab4,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab4,width=22,height=1,text=row_nf33[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab4,text=\"配方3\")\n\n ntab5=Frame(tablayout2)\n ntab5.pack(fill=\"both\")\n \n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab5,width=22,height=1,text=ncol[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(ncol)):\n for column in range(1):\n label=Label(ntab5,width=22,height=1,text=row_nf3[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n ntab1.grid_columnconfigure(column,weight=1)\n \n tablayout2.add(ntab5,text=\"最接近配方\")\n\n\n\n #顏色分類\n tab1=Frame(tablayout)\n tab1.pack(fill=\"both\")\n for row in range(len(name)):\n for column in range(1):\n label=Label(tab1,width=25,height=2,text=name[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=column,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n for row in range(len(name)):\n for column in range(1):\n label=Label(tab1,width=5,height=2,text=\"%s\" %rate[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(name)):\n for column in range(1):\n label=Label(tab1,width=12,height=2,text=\"% 相似程度\",bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=2,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n tablayout.add(tab1,text=\"顏色分類結果\")\n \n tab2=Frame(tablayout)\n tab2.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab2,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab2,width=22,height=1,text=row_df3[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab2,text=\"配方1\")\n\n tab3=Frame(tablayout)\n tab3.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab3,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab3,width=22,height=1,text=row_df32[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab3,text=\"配方2\")\n\n tab4=Frame(tablayout)\n tab4.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab4,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab4,width=22,height=1,text=row_df33[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab4,text=\"配方3\")\n\n tab5=Frame(tablayout)\n tab5.pack(fill=\"both\")\n \n for row in range(len(col)):\n for column in range(1):\n label=Label(tab5,width=22,height=1,text=col[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=0,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n\n for row in range(len(col)):\n for column in range(1):\n label=Label(tab5,width=22,height=1,text=row_text[row],bg=\"black\",fg=\"white\",padx=1,pady=1)\n label.grid(row=row,column=1,sticky=\"nsew\",padx=1,pady=1)\n tab1.grid_columnconfigure(column,weight=1)\n \n tablayout.add(tab5,text=\"最接近配方\")\n\n tablayout.pack()\n tablayout2.pack()\n window.mainloop()\n \n\n\ndef CircleCallback(event,x,y,flags,param):\n n=8\n global refPt,PtBGR,w,h,Serial,r1,r2,r3,r4,rate,rate2,rate3,r6,r7,r8,r9,add,add2,add3,color,b,g,r,df3,name,rate,col,row_text\n global row_df3,row_df32,row_df33,row_text2,row_nf3,row_nf32,nf3,row_nf33,name_n,rate_n,ncol\n if event == cv2.EVENT_LBUTTONDOWN:\n\n #下面n代表取樣點數 若n越大則越精準一般不建議超過1000\n n=500\n for c in range(0,n):\n c+=1\n #若n改變下面499改為n-1\n ranx=(random.randint(0,499))\n rany=(random.randint(0,499))\n refPt.append((ranx,rany))\n b, g, r = img[ranx,rany]\n PtBGR.append((b,g,r)) \n #print(PtBGR[0:n])\n b=[x[0] for x in PtBGR]\n g=[x[1] for x in PtBGR]\n r=[x[2] for x in PtBGR]\n if len(refPt)==n:\n BAvr=(round(sum(b[0:n])/n))\n GAvr=(round(sum(g[0:n])/n))\n RAvr=(round(sum(r[0:n])/n))\n SumRGB=(BAvr+GAvr+RAvr)\n SumAvr=(round(SumRGB/3))\n color_def(BAvr,GAvr,RAvr)\n color_name.append(color)\n AvrRGB={'R':RAvr,'G':GAvr,'B':BAvr,'Sum':SumRGB,'Avr':SumAvr,'color':color_name}\n df_test = pd.DataFrame(AvrRGB,index=[0])\n dfread = pd.read_csv(\".data base\\\\%s\" %(result2))\n dfread['A']= round((dfread['R'] + dfread['G'] + dfread['B'])/3)\n dfread['S'] = dfread['R'] + dfread['G'] + dfread['B']\n\n #交叉比對法\n nf=pd.DataFrame(list(zip(r,g,b)),columns=['R','G','B'])\n nfread=dfread[['Serial no','R','G','B']]\n loan=pd.merge(nf,nfread)\n group=loan.groupby('Serial no')\n Newnf=group.count()\n Newnf['P']=round((Newnf['R']/Newnf['R'].sum())* 100)\n Newnf=Newnf.sort_values(by=['R'],ascending=False)\n Rate=Newnf['P'].tolist()\n Newnf.columns = [' '.join(col).strip() for col in Newnf.columns.values]\n nf2=pd.DataFrame(Newnf.to_records())\n nf2=nf2.head(5)\n \n print(nf2)\n if(len(nf2['Serial no'])==0):\n i=0\n j=0\n k=0\n elif(len(nf2['Serial no'])==1):\n i=nf2.at[0,'Serial no']\n j=0\n k=0\n elif(len(nf2['Serial no'])==2):\n i=nf2.at[0,'Serial no']\n j=nf2.at[1,'Serial no']\n k=0\n else:\n i=nf2.at[0,'Serial no']\n j=nf2.at[1,'Serial no']\n k=nf2.at[2,'Serial no']\n print(k)\n nf3=dfread.loc[(dfread['Serial no']==i)].head(1)\n nf4=dfread.loc[(dfread['Serial no']==j)].head(1)\n nf5=dfread.loc[(dfread['Serial no']==k)].head(1)\n nf3=nf3.drop(['R','G','B','color','A','S'],axis=1)\n nf4=nf4.drop(['R','G','B','color','A','S'],axis=1)\n nf5=nf5.drop(['R','G','B','color','A','S'],axis=1)\n nf=pd.concat([nf3, nf4,nf5])\n nf.to_csv(\".data base\\\\test_result2.csv\",index=False,encoding=\"utf_8_sig\")\n print(nf)\n ncol=list(nf.columns)\n if(len(nf2['Serial no'])==0):\n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"未找到符合資料\")\n elif(len(nf2['Serial no'])==1):\n row_nf3=nf3.iloc[0].tolist()\n row_nf32=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']\n row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']\n\n elif(len(nf2['Serial no'])==2):\n row_nf3=nf3.iloc[0].tolist()\n row_nf32=nf4.iloc[0].tolist()\n row_nf33=['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x']\n \n else:\n row_nf3=nf3.iloc[0].tolist()\n row_nf32=nf4.iloc[0].tolist()\n print(row_nf32)\n row_nf33=nf5.iloc[0].tolist()\n name_n=nf['Serial no'].tolist()\n rate_n=Rate\n \n \n #顏色分類法\n #(可以改)當需要寬鬆一點的比對,刪除下面一段的上下兩個'''\n \n '''\n newdf1=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)]\n newdf2=dfread.loc[(dfread['S']<=(SumRGB+2))&(dfread['S']>=(SumRGB-2))]\n newdf=pd.concat([newdf1, newdf2])\n '''\n\n #(可以改)當需要嚴格一點的比對,刪除下面一段的上下兩個'''\n '''\n newdf=dfread.loc[(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n newdf=newdf.loc[(newdf['color']==color)]\n '''\n\n #並在下面一行的開頭加上#\n newdf=dfread.loc[(dfread['color']==color)|(dfread['A']==SumAvr)|(dfread['S']==SumRGB)]\n\n \n newdf.insert(1,'Rdif',newdf[['R']].add(-RAvr))\n newdf.insert(2,'Gdif',newdf[['G']].add(-GAvr))\n newdf.insert(3,'Bdif',newdf[['B']].add(-BAvr))\n newdf.insert(4,'Adif',abs(newdf[['A']].add(-SumAvr)))\n newdf.insert(5,'Sdif',abs(newdf[['S']].add(-SumRGB)))\n df=newdf.sort_values(by=['Sdif', 'Adif'], ascending=True).head(100)\n df.insert(1,'dalta',abs(df['Rdif']+df['Gdif']+df['Bdif']))\n df=df.sort_values(by=['dalta'],ascending=True)\n data=df[['Serial no','color']]\n group=data.groupby('Serial no')\n datacount=group.count()\n df=df.merge(datacount,left_on='Serial no',right_index=True)\n df=df.sort_values(by=['color_y'],ascending=False)\n df3=df.drop_duplicates('Serial no', keep='first', inplace=False).head()\n print(df3)\n df3.to_csv(\".data base\\\\test_result.csv\",index=False,encoding=\"utf_8_sig\")\n if df3.empty ==True:\n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"未找到符合資料\")\n \n elif len(df3)<=2:\n \n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"只找到少數資料\\n 已存在test_result\")\n \n else:\n Zero=df3.loc[(df3['Rdif']==0)&(df3['Gdif']==0)&(df3['Bdif']==0)]\n Zero=Zero.head(3)\n if Zero.empty==False:\n Zero=Zero.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n name=df3['Serial no'].tolist()\n rate=df3['color_y'].tolist()\n col=list(Zero.columns)\n row_text=Zero.iloc[0].tolist()\n df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n row_df3=df3.iloc[0].tolist()\n row_df32=df3.iloc[1].tolist()\n row_df33=df3.iloc[2].tolist()\n Result_Print()\n print('0')\n print(Zero)\n \n else:\n filtdf=df3.loc[(df3['A']>=SumAvr)]\n filtdf=filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()\n Neg_filtdf=df3.loc[(df3['A']<SumAvr)]\n Neg_filtdf=Neg_filtdf.sort_values(by=['Rdif','Gdif','Bdif']).head()\n \n if Neg_filtdf.empty==True and filtdf.empty ==True:\n root=tk.Tk()\n root.withdraw()\n messagebox.showinfo(\"失敗\", \"未找到符合資料\")\n else:\n filtdf=filtdf.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n name=df3['Serial no'].tolist()\n rate=df3['color_y'].tolist()\n col=list(filtdf.columns)\n row_text=filtdf.iloc[0].tolist()\n df3=df3.drop(['R','G','B','dalta','Rdif','Gdif','Bdif','A','S','Adif','Sdif','color_x','color_y'],axis=1)\n row_df3=df3.iloc[0].tolist()\n row_df32=df3.iloc[1].tolist()\n row_df33=df3.iloc[2].tolist()\n Result_Print()\n print(\"最接近的為1\",filtdf.head(1))\n \n\n \n\ndef color_def(BAvr,GAvr,RAvr):\n \n global color\n if abs(int(BAvr)-int(GAvr))<=1 and abs(int(BAvr)-int(RAvr))<=1:\n color='White'\n return color\n \n elif BAvr>=GAvr and BAvr>=RAvr:\n if BAvr-GAvr>3 and BAvr-RAvr>=3:\n color='Blue'\n return color\n \n elif BAvr-GAvr<3:\n color='Cyan'\n return color\n \n else:\n color='Purple'\n return color\n \n \n elif GAvr>=RAvr and GAvr>=BAvr:\n if GAvr-RAvr>3 or GAvr-BAvr>3:\n color='Green'\n return color\n \n elif GAvr-RAvr<3:\n color='Yellow'\n return color\n \n else:\n color='Cyan'\n return color\n \n \n elif RAvr>=GAvr and RAvr>=BAvr:\n if RAvr-GAvr>=3 and RAvr-BAvr>=3:\n color='Red'\n return color\n\n elif RAvr-GAvr<3:\n color='Yellow'\n return color\n\n else:\n color='Purple'\n return color\n \n\n else:\n color='White'\n\n\n#img=cv2.imdecode(np.fromfile(r\"D:\\桌面\\JA Material\\JA-material\\pure\\%s\" % (result),dtype=np.uint8),-1) \nimg=cv2.imdecode(np.fromfile(r\".pure\\%s\" % (result),dtype=np.uint8),-1)\ncv2.namedWindow('mouse_callback')\n\n# bind the callback function to window\n\ncv2.setMouseCallback('mouse_callback',CircleCallback)\n \ndef main():\n while (True):\n cv2.imshow('mouse_callback',img)\n if cv2.waitKey(20) == 27:\n break\n \n cv2.destroyAllWindows()\n \n \nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
import sys
import os
# Module "sys"
#
# See docs for the sys module: https://docs.python.org/3.7/library/sys.html
# Print out the command line arguments in sys.argv, one per line:
# Print out the plaform from sys:
# for arg in sys.argv:
# print(arg)
# Print out the Python version from sys:print(sys.platform)
# print(sys, sep="\n", sys.path)
print("platform: "+sys.platform + "\n" + "maxsize: "+str(sys.maxsize) + "\n" + "argv: "+str(sys.argv))
# # Module "os"
# #
# # See the docs for the OS module: https://docs.python.org/3.7/library/os.html
# # Print the current process ID
print("Process ID: "+ str(os.getpid()) + "\n" + "cwd: " + os.getcwd() + "\n" + "login id: " + os.getlogin())
# # Print the current working directory (cwd):
# print()
# # Print your login name
# print()
|
normal
|
{
"blob_id": "3fed96e9bedb157a14cf9c441de5aae8b4f6edc8",
"index": 8664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('platform: ' + sys.platform + '\\n' + 'maxsize: ' + str(sys.maxsize) +\n '\\n' + 'argv: ' + str(sys.argv))\nprint('Process ID: ' + str(os.getpid()) + '\\n' + 'cwd: ' + os.getcwd() +\n '\\n' + 'login id: ' + os.getlogin())\n",
"step-3": "import sys\nimport os\nprint('platform: ' + sys.platform + '\\n' + 'maxsize: ' + str(sys.maxsize) +\n '\\n' + 'argv: ' + str(sys.argv))\nprint('Process ID: ' + str(os.getpid()) + '\\n' + 'cwd: ' + os.getcwd() +\n '\\n' + 'login id: ' + os.getlogin())\n",
"step-4": "import sys\nimport os\n\n# Module \"sys\"\n#\n# See docs for the sys module: https://docs.python.org/3.7/library/sys.html\n\n# Print out the command line arguments in sys.argv, one per line:\n\n\n# Print out the plaform from sys:\n# for arg in sys.argv:\n# print(arg)\n\n\n# Print out the Python version from sys:print(sys.platform)\n# print(sys, sep=\"\\n\", sys.path)\nprint(\"platform: \"+sys.platform + \"\\n\" + \"maxsize: \"+str(sys.maxsize) + \"\\n\" + \"argv: \"+str(sys.argv))\n\n\n\n\n# # Module \"os\"\n# #\n# # See the docs for the OS module: https://docs.python.org/3.7/library/os.html\n\n# # Print the current process ID\nprint(\"Process ID: \"+ str(os.getpid()) + \"\\n\" + \"cwd: \" + os.getcwd() + \"\\n\" + \"login id: \" + os.getlogin())\n\n# # Print the current working directory (cwd):\n# print()\n\n# # Print your login name\n# print()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open('bzcookies.txt', 'w') as f:
f.write('')
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get(
'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers
).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png)
print(base64_data)
"""
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
"""
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
requests.packages.urllib3.disable_warnings()
ua = UserAgent(path='ua.json')
user_agent = ua.chrome
headers = {'User-Agent': user_agent, 'Referer': 'https://www.bilibili.com/'}
headerss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com',
'Referer': 'https://passport.bilibili.com/login'}
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open('bzcookies.txt', 'w') as f:
f.write('')
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get(
'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers
).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png)
print(base64_data)
"""
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
"""
if __name__ == '__main__':
bzlogin()
<|reserved_special_token_1|>
import qrcode
from fake_useragent import UserAgent
from threading import Thread
import time, base64
import requests
from io import BytesIO
import http.cookiejar as cookielib
from PIL import Image
import os
requests.packages.urllib3.disable_warnings()
ua = UserAgent(path='ua.json')
user_agent = ua.chrome
headers = {'User-Agent': user_agent, 'Referer': 'https://www.bilibili.com/'}
headerss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com',
'Referer': 'https://passport.bilibili.com/login'}
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open('bzcookies.txt', 'w') as f:
f.write('')
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get(
'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers
).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png)
print(base64_data)
"""
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
"""
if __name__ == '__main__':
bzlogin()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#借鉴的扫码单文件
import qrcode
from fake_useragent import UserAgent
from threading import Thread
import time, base64
import requests
from io import BytesIO
import http.cookiejar as cookielib
from PIL import Image
import os
requests.packages.urllib3.disable_warnings()
ua = UserAgent(path='ua.json')
user_agent = ua.chrome
headers = {'User-Agent': user_agent, 'Referer': "https://www.bilibili.com/"}
headerss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com','Referer': "https://passport.bilibili.com/login"}
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get("https://api.bilibili.com/x/web-interface/nav", verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,',loginurl['data']['uname'],',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open("bzcookies.txt", 'w') as f:
f.write("")
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get('https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png) # 使用base64进行加密
print(base64_data)
'''
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
'''
if __name__ == '__main__':
bzlogin()
|
flexible
|
{
"blob_id": "c268c61e47698d07b7c1461970dc47242af55777",
"index": 1637,
"step-1": "<mask token>\n\n\nclass showpng(Thread):\n\n def __init__(self, data):\n Thread.__init__(self)\n self.data = data\n\n def run(self):\n img = Image.open(BytesIO(self.data))\n img.show()\n\n\ndef islogin(session):\n try:\n session.cookies.load(ignore_discard=True)\n except Exception:\n pass\n loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',\n verify=False, headers=headers).json()\n if loginurl['code'] == 0:\n print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')\n return session, True\n else:\n print('Cookies值已经失效,请重新扫码登录!')\n return session, False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass showpng(Thread):\n\n def __init__(self, data):\n Thread.__init__(self)\n self.data = data\n\n def run(self):\n img = Image.open(BytesIO(self.data))\n img.show()\n\n\ndef islogin(session):\n try:\n session.cookies.load(ignore_discard=True)\n except Exception:\n pass\n loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',\n verify=False, headers=headers).json()\n if loginurl['code'] == 0:\n print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')\n return session, True\n else:\n print('Cookies值已经失效,请重新扫码登录!')\n return session, False\n\n\ndef bzlogin():\n if not os.path.exists('bzcookies.txt'):\n with open('bzcookies.txt', 'w') as f:\n f.write('')\n session = requests.session()\n session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')\n session, status = islogin(session)\n if not status:\n getlogin = session.get(\n 'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers\n ).json()\n loginurl = requests.get(getlogin['data']['url'], headers=headers).url\n oauthKey = getlogin['data']['oauthKey']\n qr = qrcode.QRCode()\n qr.add_data(loginurl)\n img = qr.make_image()\n a = BytesIO()\n img.save(a, 'png')\n png = a.getvalue()\n a.close()\n base64_data = base64.b64encode(png)\n print(base64_data)\n \"\"\"\n t = showpng(png)\n t.start()\n tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'\n while 1:\n qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()\n print(qrcodedata)\n if '-4' in str(qrcodedata['data']):\n print('二维码未失效,请扫码!')\n elif '-5' in str(qrcodedata['data']):\n print('已扫码,请确认!')\n elif '-2' in str(qrcodedata['data']):\n print('二维码已失效,请重新运行!')\n elif 'True' in str(qrcodedata['status']):\n print('已确认,登入成功!')\n session.get(qrcodedata['data']['url'], headers=headers)\n break\n else:\n print('其他:', qrcodedata)\n time.sleep(2)\n session.cookies.save()\n return session\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\nrequests.packages.urllib3.disable_warnings()\nua = UserAgent(path='ua.json')\nuser_agent = ua.chrome\nheaders = {'User-Agent': user_agent, 'Referer': 'https://www.bilibili.com/'}\nheaderss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com',\n 'Referer': 'https://passport.bilibili.com/login'}\n\n\nclass showpng(Thread):\n\n def __init__(self, data):\n Thread.__init__(self)\n self.data = data\n\n def run(self):\n img = Image.open(BytesIO(self.data))\n img.show()\n\n\ndef islogin(session):\n try:\n session.cookies.load(ignore_discard=True)\n except Exception:\n pass\n loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',\n verify=False, headers=headers).json()\n if loginurl['code'] == 0:\n print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')\n return session, True\n else:\n print('Cookies值已经失效,请重新扫码登录!')\n return session, False\n\n\ndef bzlogin():\n if not os.path.exists('bzcookies.txt'):\n with open('bzcookies.txt', 'w') as f:\n f.write('')\n session = requests.session()\n session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')\n session, status = islogin(session)\n if not status:\n getlogin = session.get(\n 'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers\n ).json()\n loginurl = requests.get(getlogin['data']['url'], headers=headers).url\n oauthKey = getlogin['data']['oauthKey']\n qr = qrcode.QRCode()\n qr.add_data(loginurl)\n img = qr.make_image()\n a = BytesIO()\n img.save(a, 'png')\n png = a.getvalue()\n a.close()\n base64_data = base64.b64encode(png)\n print(base64_data)\n \"\"\"\n t = showpng(png)\n t.start()\n tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'\n while 1:\n qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()\n print(qrcodedata)\n if '-4' in str(qrcodedata['data']):\n print('二维码未失效,请扫码!')\n elif '-5' in str(qrcodedata['data']):\n print('已扫码,请确认!')\n elif '-2' in str(qrcodedata['data']):\n print('二维码已失效,请重新运行!')\n elif 'True' in str(qrcodedata['status']):\n print('已确认,登入成功!')\n session.get(qrcodedata['data']['url'], headers=headers)\n break\n else:\n print('其他:', qrcodedata)\n time.sleep(2)\n session.cookies.save()\n return session\n \"\"\"\n\n\nif __name__ == '__main__':\n bzlogin()\n",
"step-4": "import qrcode\nfrom fake_useragent import UserAgent\nfrom threading import Thread\nimport time, base64\nimport requests\nfrom io import BytesIO\nimport http.cookiejar as cookielib\nfrom PIL import Image\nimport os\nrequests.packages.urllib3.disable_warnings()\nua = UserAgent(path='ua.json')\nuser_agent = ua.chrome\nheaders = {'User-Agent': user_agent, 'Referer': 'https://www.bilibili.com/'}\nheaderss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com',\n 'Referer': 'https://passport.bilibili.com/login'}\n\n\nclass showpng(Thread):\n\n def __init__(self, data):\n Thread.__init__(self)\n self.data = data\n\n def run(self):\n img = Image.open(BytesIO(self.data))\n img.show()\n\n\ndef islogin(session):\n try:\n session.cookies.load(ignore_discard=True)\n except Exception:\n pass\n loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',\n verify=False, headers=headers).json()\n if loginurl['code'] == 0:\n print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')\n return session, True\n else:\n print('Cookies值已经失效,请重新扫码登录!')\n return session, False\n\n\ndef bzlogin():\n if not os.path.exists('bzcookies.txt'):\n with open('bzcookies.txt', 'w') as f:\n f.write('')\n session = requests.session()\n session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')\n session, status = islogin(session)\n if not status:\n getlogin = session.get(\n 'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers\n ).json()\n loginurl = requests.get(getlogin['data']['url'], headers=headers).url\n oauthKey = getlogin['data']['oauthKey']\n qr = qrcode.QRCode()\n qr.add_data(loginurl)\n img = qr.make_image()\n a = BytesIO()\n img.save(a, 'png')\n png = a.getvalue()\n a.close()\n base64_data = base64.b64encode(png)\n print(base64_data)\n \"\"\"\n t = showpng(png)\n t.start()\n tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'\n while 1:\n qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()\n print(qrcodedata)\n if '-4' in str(qrcodedata['data']):\n print('二维码未失效,请扫码!')\n elif '-5' in str(qrcodedata['data']):\n print('已扫码,请确认!')\n elif '-2' in str(qrcodedata['data']):\n print('二维码已失效,请重新运行!')\n elif 'True' in str(qrcodedata['status']):\n print('已确认,登入成功!')\n session.get(qrcodedata['data']['url'], headers=headers)\n break\n else:\n print('其他:', qrcodedata)\n time.sleep(2)\n session.cookies.save()\n return session\n \"\"\"\n\n\nif __name__ == '__main__':\n bzlogin()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n#借鉴的扫码单文件\r\nimport qrcode\r\nfrom fake_useragent import UserAgent\r\nfrom threading import Thread\r\nimport time, base64\r\nimport requests\r\nfrom io import BytesIO\r\nimport http.cookiejar as cookielib\r\nfrom PIL import Image\r\nimport os\r\n\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\n\r\nua = UserAgent(path='ua.json')\r\nuser_agent = ua.chrome\r\n\r\nheaders = {'User-Agent': user_agent, 'Referer': \"https://www.bilibili.com/\"}\r\nheaderss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com','Referer': \"https://passport.bilibili.com/login\"}\r\n\r\n\r\nclass showpng(Thread):\r\n def __init__(self, data):\r\n Thread.__init__(self)\r\n self.data = data\r\n\r\n def run(self):\r\n img = Image.open(BytesIO(self.data))\r\n img.show()\r\n\r\n\r\ndef islogin(session):\r\n try:\r\n session.cookies.load(ignore_discard=True)\r\n except Exception:\r\n pass\r\n loginurl = session.get(\"https://api.bilibili.com/x/web-interface/nav\", verify=False, headers=headers).json()\r\n if loginurl['code'] == 0:\r\n print('Cookies值有效,',loginurl['data']['uname'],',已登录!')\r\n return session, True\r\n else:\r\n print('Cookies值已经失效,请重新扫码登录!')\r\n return session, False\r\n\r\n\r\ndef bzlogin():\r\n if not os.path.exists('bzcookies.txt'):\r\n with open(\"bzcookies.txt\", 'w') as f:\r\n f.write(\"\")\r\n session = requests.session()\r\n session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')\r\n session, status = islogin(session)\r\n if not status:\r\n getlogin = session.get('https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers).json()\r\n loginurl = requests.get(getlogin['data']['url'], headers=headers).url\r\n oauthKey = getlogin['data']['oauthKey']\r\n qr = qrcode.QRCode()\r\n qr.add_data(loginurl)\r\n img = qr.make_image()\r\n a = BytesIO()\r\n img.save(a, 'png')\r\n png = a.getvalue()\r\n a.close()\r\n base64_data = base64.b64encode(png) # 使用base64进行加密\r\n print(base64_data)\r\n '''\r\n t = showpng(png)\r\n t.start()\r\n tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'\r\n while 1:\r\n qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()\r\n print(qrcodedata)\r\n if '-4' in str(qrcodedata['data']):\r\n print('二维码未失效,请扫码!')\r\n elif '-5' in str(qrcodedata['data']):\r\n print('已扫码,请确认!')\r\n elif '-2' in str(qrcodedata['data']):\r\n print('二维码已失效,请重新运行!')\r\n elif 'True' in str(qrcodedata['status']):\r\n print('已确认,登入成功!')\r\n session.get(qrcodedata['data']['url'], headers=headers)\r\n break\r\n else:\r\n print('其他:', qrcodedata)\r\n time.sleep(2)\r\n session.cookies.save()\r\n return session\r\n '''\r\n\r\nif __name__ == '__main__':\r\n bzlogin()\r\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
# Generated by Django 2.2 on 2021-01-31 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_product_pr_number'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='PA_id',
),
migrations.AddField(
model_name='payment',
name='buyer',
field=models.CharField(default=0, max_length=32),
preserve_default=False,
),
migrations.AlterField(
model_name='payment',
name='PA_type',
field=models.CharField(default='credit', max_length=32),
),
]
|
normal
|
{
"blob_id": "388772386f25d6c2f9cc8778b7ce1b2ad0920851",
"index": 6986,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0004_product_pr_number')]\n operations = [migrations.RemoveField(model_name='payment', name='PA_id'\n ), migrations.AddField(model_name='payment', name='buyer', field=\n models.CharField(default=0, max_length=32), preserve_default=False),\n migrations.AlterField(model_name='payment', name='PA_type', field=\n models.CharField(default='credit', max_length=32))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0004_product_pr_number')]\n operations = [migrations.RemoveField(model_name='payment', name='PA_id'\n ), migrations.AddField(model_name='payment', name='buyer', field=\n models.CharField(default=0, max_length=32), preserve_default=False),\n migrations.AlterField(model_name='payment', name='PA_type', field=\n models.CharField(default='credit', max_length=32))]\n",
"step-5": "# Generated by Django 2.2 on 2021-01-31 14:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0004_product_pr_number'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='payment',\n name='PA_id',\n ),\n migrations.AddField(\n model_name='payment',\n name='buyer',\n field=models.CharField(default=0, max_length=32),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='payment',\n name='PA_type',\n field=models.CharField(default='credit', max_length=32),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def caesar(plaintext, key):
if int(key) < 0:
return
plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]
for ascii in plaintext_ascii:
if ascii < 97 and ascii > 90 or ascii > 122:
ascii -= 25
ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])
print('ciphertext: {}'.format(ciphertext))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def caesar(plaintext, key):
if int(key) < 0:
return
plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]
for ascii in plaintext_ascii:
if ascii < 97 and ascii > 90 or ascii > 122:
ascii -= 25
ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])
print('ciphertext: {}'.format(ciphertext))
if __name__ == '__main__':
if len(sys.argv) is not 3:
print('Usage: python caesar.py plaintext key')
else:
caesar(sys.argv[1], sys.argv[2])
<|reserved_special_token_1|>
import sys
def caesar(plaintext, key):
if int(key) < 0:
return
plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]
for ascii in plaintext_ascii:
if ascii < 97 and ascii > 90 or ascii > 122:
ascii -= 25
ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])
print('ciphertext: {}'.format(ciphertext))
if __name__ == '__main__':
if len(sys.argv) is not 3:
print('Usage: python caesar.py plaintext key')
else:
caesar(sys.argv[1], sys.argv[2])
<|reserved_special_token_1|>
import sys
def caesar( plaintext, key ):
if int( key ) < 0:
return
plaintext_ascii = [ ( ord( char ) + int( key ) ) for char in plaintext ]
for ascii in plaintext_ascii:
if ( ascii < 97 and ascii > 90 ) or ascii > 122:
ascii -= 25
ciphertext = ''.join( [ chr( ascii ) for ascii in plaintext_ascii ] )
print( 'ciphertext: {}'.format( ciphertext ) )
if __name__ == '__main__':
if len( sys.argv ) is not 3:
print( 'Usage: python caesar.py plaintext key' )
else:
caesar( sys.argv[1], sys.argv[2] )
|
flexible
|
{
"blob_id": "9a7c6998e9e486f0497d3684f9c7a422c8e13521",
"index": 7076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef caesar(plaintext, key):\n if int(key) < 0:\n return\n plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]\n for ascii in plaintext_ascii:\n if ascii < 97 and ascii > 90 or ascii > 122:\n ascii -= 25\n ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])\n print('ciphertext: {}'.format(ciphertext))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef caesar(plaintext, key):\n if int(key) < 0:\n return\n plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]\n for ascii in plaintext_ascii:\n if ascii < 97 and ascii > 90 or ascii > 122:\n ascii -= 25\n ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])\n print('ciphertext: {}'.format(ciphertext))\n\n\nif __name__ == '__main__':\n if len(sys.argv) is not 3:\n print('Usage: python caesar.py plaintext key')\n else:\n caesar(sys.argv[1], sys.argv[2])\n",
"step-4": "import sys\n\n\ndef caesar(plaintext, key):\n if int(key) < 0:\n return\n plaintext_ascii = [(ord(char) + int(key)) for char in plaintext]\n for ascii in plaintext_ascii:\n if ascii < 97 and ascii > 90 or ascii > 122:\n ascii -= 25\n ciphertext = ''.join([chr(ascii) for ascii in plaintext_ascii])\n print('ciphertext: {}'.format(ciphertext))\n\n\nif __name__ == '__main__':\n if len(sys.argv) is not 3:\n print('Usage: python caesar.py plaintext key')\n else:\n caesar(sys.argv[1], sys.argv[2])\n",
"step-5": "import sys\n\ndef caesar( plaintext, key ):\n if int( key ) < 0:\n return\n\n plaintext_ascii = [ ( ord( char ) + int( key ) ) for char in plaintext ]\n for ascii in plaintext_ascii:\n if ( ascii < 97 and ascii > 90 ) or ascii > 122:\n ascii -= 25\n\n ciphertext = ''.join( [ chr( ascii ) for ascii in plaintext_ascii ] )\n print( 'ciphertext: {}'.format( ciphertext ) )\n\nif __name__ == '__main__':\n if len( sys.argv ) is not 3:\n print( 'Usage: python caesar.py plaintext key' )\n else:\n caesar( sys.argv[1], sys.argv[2] )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import os, glob, sys, math, time, argparse
import ROOT
from ROOT import TFile, TTree, TH2D
def main():
parser = argparse.ArgumentParser(description='Program that takes as an argument a pattern of LEAF rootfiles (* wildcards work) enclosed by quotation marks ("<pattern>") and creates a rootfile with the MC b-tagging efficiencies. Assumes the b-tagging MC efficiency histogram folder inside the root files is called "BTaggingMCEfficiencies".')
parser.add_argument('--input', '-i', required=True, type=str, help='Name of the json converted to a .txt file')
parser.add_argument('--output', '-o', type=str, help='Name of the output .root file. Default is "BTaggingMCEfficiencies.root"')
args = parser.parse_args()
infilepattern = os.path.abspath(args.input)
outfilename = os.path.abspath(args.output) if args.output is not None else 'BTaggingMCEfficiencies.root'
infilenames = glob.glob(infilepattern)
foldername = 'BTaggingMCEfficiencies'
btag_histo_names = ['b_passing', 'b_total', 'c_passing', 'c_total', 'udsg_passing', 'udsg_total']
merged_histograms = []
for idx, infilename in enumerate(infilenames):
infile = TFile(infilename, 'READ')
for idx_hist, name in enumerate(btag_histo_names):
histname = foldername + '/' + name
if idx == 0:
hist = infile.Get(histname)
merged_histograms.append(hist)
hist.SetDirectory(0)
# if idx_hist == 5: print 'number of new entries:', hist.GetEntries()
else:
merged_histograms[idx_hist].Add(infile.Get(histname))
thishist = infile.Get(histname)
# if idx_hist == 5: print 'number of new entries:', thishist.GetEntries()
# print 'number of entries merged:', merged_histograms[5].GetEntries()
# print merged_histograms
outfile = TFile(outfilename, 'RECREATE')
# outhists = []
for idx, name in enumerate(btag_histo_names):
if 'passing' in name:
num = merged_histograms[idx]
for hist in merged_histograms:
if hist.GetName() == num.GetName().replace('passing', 'total'):
den = hist
print num.GetBinContent(4,1), den.GetBinContent(4,1)
num.Divide(den)
print num.GetBinContent(4,1)
num.SetName(num.GetName().replace('passing', 'efficiency'))
num.Write()
outfile.Close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "44c04cf79d02823318b06f02af13973960413bea",
"index": 6915,
"step-1": "#!/usr/bin/env python\n\nimport os, glob, sys, math, time, argparse\nimport ROOT\nfrom ROOT import TFile, TTree, TH2D\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Program that takes as an argument a pattern of LEAF rootfiles (* wildcards work) enclosed by quotation marks (\"<pattern>\") and creates a rootfile with the MC b-tagging efficiencies. Assumes the b-tagging MC efficiency histogram folder inside the root files is called \"BTaggingMCEfficiencies\".')\n parser.add_argument('--input', '-i', required=True, type=str, help='Name of the json converted to a .txt file')\n parser.add_argument('--output', '-o', type=str, help='Name of the output .root file. Default is \"BTaggingMCEfficiencies.root\"')\n\n args = parser.parse_args()\n infilepattern = os.path.abspath(args.input)\n outfilename = os.path.abspath(args.output) if args.output is not None else 'BTaggingMCEfficiencies.root'\n\n infilenames = glob.glob(infilepattern)\n foldername = 'BTaggingMCEfficiencies'\n\n btag_histo_names = ['b_passing', 'b_total', 'c_passing', 'c_total', 'udsg_passing', 'udsg_total']\n\n merged_histograms = []\n for idx, infilename in enumerate(infilenames):\n infile = TFile(infilename, 'READ')\n for idx_hist, name in enumerate(btag_histo_names):\n histname = foldername + '/' + name\n if idx == 0:\n hist = infile.Get(histname)\n merged_histograms.append(hist)\n hist.SetDirectory(0)\n # if idx_hist == 5: print 'number of new entries:', hist.GetEntries()\n else:\n merged_histograms[idx_hist].Add(infile.Get(histname))\n thishist = infile.Get(histname)\n # if idx_hist == 5: print 'number of new entries:', thishist.GetEntries()\n # print 'number of entries merged:', merged_histograms[5].GetEntries()\n # print merged_histograms\n\n outfile = TFile(outfilename, 'RECREATE')\n # outhists = []\n for idx, name in enumerate(btag_histo_names):\n if 'passing' in name:\n num = merged_histograms[idx]\n for hist in merged_histograms:\n if hist.GetName() == num.GetName().replace('passing', 'total'):\n den = hist\n print num.GetBinContent(4,1), den.GetBinContent(4,1)\n num.Divide(den)\n print num.GetBinContent(4,1)\n num.SetName(num.GetName().replace('passing', 'efficiency'))\n num.Write()\n outfile.Close()\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class SSLTransport:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv')
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv_into'
)
if buffer and nbytes is None:
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to sendall')
count = 0
with memoryview(data) as view, view.cast('B') as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to send')
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(self, mode='r', buffering=None, encoding=None, errors=None,
newline=None):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {'r', 'w', 'b'}:
raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)
)
writing = 'w' in mode
reading = 'r' in mode or not writing
assert reading or writing
binary = 'b' in mode
rawmode = ''
if reading:
rawmode += 'r'
if writing:
rawmode += 'w'
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError('unbuffered streams must be binary')
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
<|reserved_special_token_0|>
def close(self):
self.socket.close()
<|reserved_special_token_0|>
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SSLTransport:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, socket, ssl_context, server_hostname=None,
suppress_ragged_eofs=True):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,
server_hostname=server_hostname)
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv')
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv_into'
)
if buffer and nbytes is None:
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to sendall')
count = 0
with memoryview(data) as view, view.cast('B') as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to send')
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(self, mode='r', buffering=None, encoding=None, errors=None,
newline=None):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {'r', 'w', 'b'}:
raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)
)
writing = 'w' in mode
reading = 'r' in mode or not writing
assert reading or writing
binary = 'b' in mode
rawmode = ''
if reading:
rawmode += 'r'
if writing:
rawmode += 'w'
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError('unbuffered streams must be binary')
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
<|reserved_special_token_0|>
def close(self):
self.socket.close()
<|reserved_special_token_0|>
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, 'wrap_bio'):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't available on non-native SSLContext"
)
def __init__(self, socket, ssl_context, server_hostname=None,
suppress_ragged_eofs=True):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,
server_hostname=server_hostname)
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv')
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv_into'
)
if buffer and nbytes is None:
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to sendall')
count = 0
with memoryview(data) as view, view.cast('B') as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to send')
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(self, mode='r', buffering=None, encoding=None, errors=None,
newline=None):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {'r', 'w', 'b'}:
raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)
)
writing = 'w' in mode
reading = 'r' in mode or not writing
assert reading or writing
binary = 'b' in mode
rawmode = ''
if reading:
rawmode += 'r'
if writing:
rawmode += 'w'
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError('unbuffered streams must be binary')
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SSL_BLOCKSIZE = 16384
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, 'wrap_bio'):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't available on non-native SSLContext"
)
def __init__(self, socket, ssl_context, server_hostname=None,
suppress_ragged_eofs=True):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,
server_hostname=server_hostname)
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv')
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to recv_into'
)
if buffer and nbytes is None:
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to sendall')
count = 0
with memoryview(data) as view, view.cast('B') as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError('non-zero flags not allowed in calls to send')
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(self, mode='r', buffering=None, encoding=None, errors=None,
newline=None):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {'r', 'w', 'b'}:
raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)
)
writing = 'w' in mode
reading = 'r' in mode or not writing
assert reading or writing
binary = 'b' in mode
rawmode = ''
if reading:
rawmode += 'r'
if writing:
rawmode += 'w'
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError('unbuffered streams must be binary')
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
<|reserved_special_token_1|>
import io
import socket
import ssl
from ..exceptions import ProxySchemeUnsupported
from ..packages import six
SSL_BLOCKSIZE = 16384
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, "wrap_bio"):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"available on non-native SSLContext"
)
def __init__(
self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(
self.incoming, self.outgoing, server_hostname=server_hostname
)
# Perform initial handshake.
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv")
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv_into")
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to sendall")
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to send")
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0 # eof, return 0.
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
# WANT_READ, and WANT_WRITE are expected, others are not.
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
|
flexible
|
{
"blob_id": "78d59e903fecd211aa975ae4c8dc01b17c8fad44",
"index": 8471,
"step-1": "<mask token>\n\n\nclass SSLTransport:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n <mask token>\n\n def close(self):\n self.socket.close()\n <mask token>\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n <mask token>\n <mask token>\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-2": "<mask token>\n\n\nclass SSLTransport:\n <mask token>\n <mask token>\n\n def __init__(self, socket, ssl_context, server_hostname=None,\n suppress_ragged_eofs=True):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,\n server_hostname=server_hostname)\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n <mask token>\n\n def close(self):\n self.socket.close()\n <mask token>\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-3": "<mask token>\n\n\nclass SSLTransport:\n \"\"\"\n The SSLTransport wraps an existing socket and establishes an SSL connection.\n\n Contrary to Python's implementation of SSLSocket, it allows you to chain\n multiple TLS connections together. It's particularly useful if you need to\n implement TLS within TLS.\n\n The class supports most of the socket API operations.\n \"\"\"\n\n @staticmethod\n def _validate_ssl_context_for_tls_in_tls(ssl_context):\n \"\"\"\n Raises a ProxySchemeUnsupported if the provided ssl_context can't be used\n for TLS in TLS.\n\n The only requirement is that the ssl_context provides the 'wrap_bio'\n methods.\n \"\"\"\n if not hasattr(ssl_context, 'wrap_bio'):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't available on non-native SSLContext\"\n )\n\n def __init__(self, socket, ssl_context, server_hostname=None,\n suppress_ragged_eofs=True):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,\n server_hostname=server_hostname)\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n\n def unwrap(self):\n self._ssl_io_loop(self.sslobj.unwrap)\n\n def close(self):\n self.socket.close()\n\n def getpeercert(self, binary_form=False):\n return self.sslobj.getpeercert(binary_form)\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-4": "<mask token>\nSSL_BLOCKSIZE = 16384\n\n\nclass SSLTransport:\n \"\"\"\n The SSLTransport wraps an existing socket and establishes an SSL connection.\n\n Contrary to Python's implementation of SSLSocket, it allows you to chain\n multiple TLS connections together. It's particularly useful if you need to\n implement TLS within TLS.\n\n The class supports most of the socket API operations.\n \"\"\"\n\n @staticmethod\n def _validate_ssl_context_for_tls_in_tls(ssl_context):\n \"\"\"\n Raises a ProxySchemeUnsupported if the provided ssl_context can't be used\n for TLS in TLS.\n\n The only requirement is that the ssl_context provides the 'wrap_bio'\n methods.\n \"\"\"\n if not hasattr(ssl_context, 'wrap_bio'):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't available on non-native SSLContext\"\n )\n\n def __init__(self, socket, ssl_context, server_hostname=None,\n suppress_ragged_eofs=True):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,\n server_hostname=server_hostname)\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n\n def unwrap(self):\n self._ssl_io_loop(self.sslobj.unwrap)\n\n def close(self):\n self.socket.close()\n\n def getpeercert(self, binary_form=False):\n return self.sslobj.getpeercert(binary_form)\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-5": "import io\nimport socket\nimport ssl\n\nfrom ..exceptions import ProxySchemeUnsupported\nfrom ..packages import six\n\nSSL_BLOCKSIZE = 16384\n\n\nclass SSLTransport:\n \"\"\"\n The SSLTransport wraps an existing socket and establishes an SSL connection.\n\n Contrary to Python's implementation of SSLSocket, it allows you to chain\n multiple TLS connections together. It's particularly useful if you need to\n implement TLS within TLS.\n\n The class supports most of the socket API operations.\n \"\"\"\n\n @staticmethod\n def _validate_ssl_context_for_tls_in_tls(ssl_context):\n \"\"\"\n Raises a ProxySchemeUnsupported if the provided ssl_context can't be used\n for TLS in TLS.\n\n The only requirement is that the ssl_context provides the 'wrap_bio'\n methods.\n \"\"\"\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )\n\n def __init__(\n self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True\n ):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n\n self.sslobj = ssl_context.wrap_bio(\n self.incoming, self.outgoing, server_hostname=server_hostname\n )\n\n # Perform initial handshake.\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to recv\")\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to recv_into\")\n if buffer and (nbytes is None):\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to sendall\")\n count = 0\n with memoryview(data) as view, view.cast(\"B\") as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to send\")\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(\n self, mode=\"r\", buffering=None, encoding=None, errors=None, newline=None\n ):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {\"r\", \"w\", \"b\"}:\n raise ValueError(\"invalid mode %r (only r, w, b allowed)\" % (mode,))\n\n writing = \"w\" in mode\n reading = \"r\" in mode or not writing\n assert reading or writing\n binary = \"b\" in mode\n rawmode = \"\"\n if reading:\n rawmode += \"r\"\n if writing:\n rawmode += \"w\"\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError(\"unbuffered streams must be binary\")\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n\n def unwrap(self):\n self._ssl_io_loop(self.sslobj.unwrap)\n\n def close(self):\n self.socket.close()\n\n def getpeercert(self, binary_form=False):\n return self.sslobj.getpeercert(binary_form)\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0 # eof, return 0.\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):\n # WANT_READ, and WANT_WRITE are expected, others are not.\n raise e\n errno = e.errno\n\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-ids": [
19,
23,
27,
28,
30
]
}
|
[
19,
23,
27,
28,
30
] |
from manimlib.imports import *
import math
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY, fill_opacity=1).shift(2*RIGHT)
fr = Dot().shift(UP+3*RIGHT)
fl = Dot().shift(UP+RIGHT)
rl = Dot().shift(DOWN+RIGHT)
rr = Dot().shift(DOWN+3*RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP+3*RIGHT, np.array(UP+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP+RIGHT, np.array(UP+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN+RIGHT, np.array(DOWN+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN+3*RIGHT, np.array(DOWN+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3*LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(3*LEFT)
left_stick.add_updater(lambda x: x.move_to(3*LEFT+0.4*x_tracker.get_value()*RIGHT+0.4*y_tracker.get_value()*UP))
right_pad = Circle(radius=0.5).move_to(1*LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(1*LEFT)
right_stick.add_updater(lambda x: x.move_to(1*LEFT+0.4*rot_tracker.get_value()*RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl), ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick), ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector), ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
# Full forward
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))
# Semi circle
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2, rate_func=there_and_back),
ApplyMethod(y_tracker.set_value, -1, run_time=2, rate_func=smooth))
# Semi circle
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func=there_and_back),
ApplyMethod(y_tracker.set_value, 1, run_time=2, rate_func=smooth))
# Neutral
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))
# Pure rotation
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Full forward plus rotation
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Neutral
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))
# Move FR
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3*DOWN), ApplyMethod(fr_vector.shift, 0.3*DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3*UP), ApplyMethod(fr_vector.shift, 0.3*UP))
fr_vector.add_updater(updateFRArrow)
# Neutral
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Fade out
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr), FadeOut(chassis),
FadeOut(left_pad), FadeOut(left_stick), FadeOut(right_pad), FadeOut(right_stick),
FadeOut(fr_vector), FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
wheelBase = 10
trackWidth = 10
def calculateVectors(FWD, STR, RCW, gyroAngle):
# Makes the command field-centric.
temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)
STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)
FWD = temp
# Uses inverse kinematics to derive wheel speeds and angles.
R = math.hypot(wheelBase, trackWidth)
A = STR - RCW * (wheelBase / R)
B = STR + RCW * (wheelBase / R)
C = FWD - RCW * (trackWidth / R)
D = FWD + RCW * (trackWidth / R)
fr_ws = math.hypot(B, C)
fl_ws = math.hypot(B, D)
bl_ws = math.hypot(A, D)
br_ws = math.hypot(A, C)
fr_wa = math.atan2(B, C) * 180 / math.pi
fl_wa = math.atan2(B, D) * 180 / math.pi
bl_wa = math.atan2(A, D) * 180 / math.pi
br_wa = math.atan2(A, C) * 180 / math.pi
# Normalize wheel speeds.
max = fr_ws
if fl_ws > max:
max = fl_ws
if bl_ws > max:
max = bl_ws
if br_ws > max:
max = br_ws
if max > 1:
fr_ws /= max
fl_ws /= max
bl_ws /= max
br_ws /= max
return np.array([[fr_ws, fr_wa],
[fl_ws, fl_wa],
[bl_ws, bl_wa],
[br_ws, br_wa]])
|
normal
|
{
"blob_id": "bdde3a3725510d4a83b09421e4b8538a38e29584",
"index": 8196,
"step-1": "<mask token>\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\n<mask token>\n\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n R = math.hypot(wheelBase, trackWidth)\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,\n br_wa]])\n",
"step-3": "<mask token>\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\nwheelBase = 10\ntrackWidth = 10\n\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n R = math.hypot(wheelBase, trackWidth)\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,\n br_wa]])\n",
"step-4": "from manimlib.imports import *\nimport math\n\n\nclass A_Swerve(Scene):\n\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY,\n fill_opacity=1).shift(2 * RIGHT)\n fr = Dot().shift(UP + 3 * RIGHT)\n fl = Dot().shift(UP + RIGHT)\n rl = Dot().shift(DOWN + RIGHT)\n rr = Dot().shift(DOWN + 3 * RIGHT)\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP + 3 * RIGHT, np.array(UP + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP + RIGHT, np.array(UP + RIGHT + \n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN + RIGHT, np.array(DOWN + RIGHT +\n vector[0] * np.cos(np.radians(vector[1])) * UP + vector[0] *\n np.sin(np.radians(vector[1])) * RIGHT))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.\n get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN + 3 * RIGHT, np.array(DOWN + 3 *\n RIGHT + vector[0] * np.cos(np.radians(vector[1])) * UP + \n vector[0] * np.sin(np.radians(vector[1])) * RIGHT))\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n left_pad = Circle(radius=0.5).move_to(3 * LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(3 * LEFT)\n left_stick.add_updater(lambda x: x.move_to(3 * LEFT + 0.4 *\n x_tracker.get_value() * RIGHT + 0.4 * y_tracker.get_value() * UP))\n right_pad = Circle(radius=0.5).move_to(1 * LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1\n ).move_to(1 * LEFT)\n right_stick.add_updater(lambda x: x.move_to(1 * LEFT + 0.4 *\n rot_tracker.get_value() * RIGHT))\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl),\n ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick),\n ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector),\n ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2,\n rate_func=there_and_back), ApplyMethod(y_tracker.set_value, -1,\n run_time=2, rate_func=smooth))\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func\n =there_and_back), ApplyMethod(y_tracker.set_value, 1, run_time=\n 2, rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func\n =smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2,\n rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1,\n rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3 * DOWN), ApplyMethod(fr_vector.\n shift, 0.3 * DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.\n set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.\n set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3 * UP), ApplyMethod(fr_vector.\n shift, 0.3 * UP))\n fr_vector.add_updater(updateFRArrow)\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1,\n rate_func=smooth))\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr),\n FadeOut(chassis), FadeOut(left_pad), FadeOut(left_stick),\n FadeOut(right_pad), FadeOut(right_stick), FadeOut(fr_vector),\n FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\n\nwheelBase = 10\ntrackWidth = 10\n\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n R = math.hypot(wheelBase, trackWidth)\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n return np.array([[fr_ws, fr_wa], [fl_ws, fl_wa], [bl_ws, bl_wa], [br_ws,\n br_wa]])\n",
"step-5": "from manimlib.imports import *\nimport math\n\nclass A_Swerve(Scene):\n def construct(self):\n chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY, fill_opacity=1).shift(2*RIGHT)\n\n fr = Dot().shift(UP+3*RIGHT)\n fl = Dot().shift(UP+RIGHT)\n rl = Dot().shift(DOWN+RIGHT)\n rr = Dot().shift(DOWN+3*RIGHT)\n\n x_tracker = ValueTracker(0)\n y_tracker = ValueTracker(0.001)\n rot_tracker = ValueTracker(0)\n\n def updateFRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[0]\n arrow.put_start_and_end_on(UP+3*RIGHT, np.array(UP+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n \n def updateFLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[1]\n arrow.put_start_and_end_on(UP+RIGHT, np.array(UP+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n\n def updateRLArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[2]\n arrow.put_start_and_end_on(DOWN+RIGHT, np.array(DOWN+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n\n def updateRRArrow(arrow):\n vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[3]\n arrow.put_start_and_end_on(DOWN+3*RIGHT, np.array(DOWN+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))\n\n fr_vector = Arrow()\n fr_vector.add_updater(updateFRArrow)\n fl_vector = Arrow()\n fl_vector.add_updater(updateFLArrow)\n rl_vector = Arrow()\n rl_vector.add_updater(updateRLArrow)\n rr_vector = Arrow()\n rr_vector.add_updater(updateRRArrow)\n\n left_pad = Circle(radius=0.5).move_to(3*LEFT)\n left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(3*LEFT)\n left_stick.add_updater(lambda x: x.move_to(3*LEFT+0.4*x_tracker.get_value()*RIGHT+0.4*y_tracker.get_value()*UP))\n\n right_pad = Circle(radius=0.5).move_to(1*LEFT)\n right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(1*LEFT)\n right_stick.add_updater(lambda x: x.move_to(1*LEFT+0.4*rot_tracker.get_value()*RIGHT))\n\n self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl), ShowCreation(rl), ShowCreation(rr))\n self.play(ShowCreation(left_pad), ShowCreation(left_stick), ShowCreation(right_pad), ShowCreation(right_stick))\n self.play(ShowCreation(fr_vector), ShowCreation(fl_vector), ShowCreation(rl_vector), ShowCreation(rr_vector))\n self.wait(1)\n # Full forward\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))\n # Semi circle\n self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2, rate_func=there_and_back), \n ApplyMethod(y_tracker.set_value, -1, run_time=2, rate_func=smooth))\n # Semi circle\n self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func=there_and_back), \n ApplyMethod(y_tracker.set_value, 1, run_time=2, rate_func=smooth))\n # Neutral\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))\n # Pure rotation\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))\n # Full forward plus rotation\n self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))\n # Neutral\n self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))\n # Move FR\n self.wait(1)\n self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))\n fr_vector.remove_updater(updateFRArrow)\n self.play(ApplyMethod(fr.shift, 0.3*DOWN), ApplyMethod(fr_vector.shift, 0.3*DOWN))\n self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.set_color, RED))\n self.wait(1)\n self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.set_color, WHITE))\n self.play(ApplyMethod(fr.shift, 0.3*UP), ApplyMethod(fr_vector.shift, 0.3*UP))\n fr_vector.add_updater(updateFRArrow)\n # Neutral\n self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))\n # Fade out\n self.wait(1)\n self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr), FadeOut(chassis),\n FadeOut(left_pad), FadeOut(left_stick), FadeOut(right_pad), FadeOut(right_stick),\n FadeOut(fr_vector), FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))\n\nwheelBase = 10\ntrackWidth = 10\n\ndef calculateVectors(FWD, STR, RCW, gyroAngle):\n\n # Makes the command field-centric.\n temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)\n STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)\n FWD = temp\n\n # Uses inverse kinematics to derive wheel speeds and angles.\n R = math.hypot(wheelBase, trackWidth)\n\n A = STR - RCW * (wheelBase / R)\n B = STR + RCW * (wheelBase / R)\n C = FWD - RCW * (trackWidth / R)\n D = FWD + RCW * (trackWidth / R)\n\n fr_ws = math.hypot(B, C)\n fl_ws = math.hypot(B, D)\n bl_ws = math.hypot(A, D)\n br_ws = math.hypot(A, C)\n\n fr_wa = math.atan2(B, C) * 180 / math.pi\n fl_wa = math.atan2(B, D) * 180 / math.pi\n bl_wa = math.atan2(A, D) * 180 / math.pi\n br_wa = math.atan2(A, C) * 180 / math.pi\n\n # Normalize wheel speeds.\n max = fr_ws\n if fl_ws > max:\n max = fl_ws\n if bl_ws > max:\n max = bl_ws\n if br_ws > max:\n max = br_ws\n\n if max > 1:\n fr_ws /= max\n fl_ws /= max\n bl_ws /= max\n br_ws /= max\n\n return np.array([[fr_ws, fr_wa], \n [fl_ws, fl_wa], \n [bl_ws, bl_wa], \n [br_ws, br_wa]])\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.shortcuts import render, HttpResponse, redirect
from .models import Book, Author # This is the models.py Database
# Create your views here.
def main(request):
context = {
"the_books" : Book.objects.all(), #Book Class model.py
}
return render(request, "index.html", context)
def book(request):
Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])
return redirect('/')
def author(request):
context = {
"the_auths" : Author.objects.all(), #Author Class model.py
}
return render(request, "author.html", context)
def auth(request):
Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])
# newA = Author(first_name= "jlkj")
# newA.save()
return redirect('/author')
def authInfo(request, authorid):
context = {
'selectedAuthor' : Author.objects.get(id=authorid)
}
return render(request, "author_info.html", context)
def bookInfo(request, bookid):
context = {
'selectedBook' : Book.objects.get(id=bookid),
'allAuthors' : Author.objects.all()
}
return render(request, "book_info.html", context)
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id = request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f"/bookinfo/{bookid}")
|
normal
|
{
"blob_id": "02bec34b138d53235dc944adeae8ccb8d6b3d340",
"index": 4424,
"step-1": "<mask token>\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\n<mask token>\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-2": "<mask token>\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\n<mask token>\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-3": "<mask token>\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\ndef bookInfo(request, bookid):\n context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':\n Author.objects.all()}\n return render(request, 'book_info.html', context)\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-4": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Book, Author\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\ndef bookInfo(request, bookid):\n context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':\n Author.objects.all()}\n return render(request, 'book_info.html', context)\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-5": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Book, Author # This is the models.py Database\n\n# Create your views here.\n\ndef main(request):\n context = {\n \"the_books\" : Book.objects.all(), #Book Class model.py\n }\n return render(request, \"index.html\", context)\n\ndef book(request):\n Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])\n return redirect('/')\n\ndef author(request):\n context = {\n \"the_auths\" : Author.objects.all(), #Author Class model.py\n }\n return render(request, \"author.html\", context)\n\ndef auth(request):\n Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])\n # newA = Author(first_name= \"jlkj\")\n # newA.save()\n return redirect('/author')\n\ndef authInfo(request, authorid):\n context = {\n 'selectedAuthor' : Author.objects.get(id=authorid)\n }\n return render(request, \"author_info.html\", context)\n\ndef bookInfo(request, bookid):\n context = {\n 'selectedBook' : Book.objects.get(id=bookid),\n 'allAuthors' : Author.objects.all()\n }\n return render(request, \"book_info.html\", context)\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id = request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f\"/bookinfo/{bookid}\")",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#dependencies go here
import numpy as np
import datetime as dt
from datetime import timedelta
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#Set up the engine to connect to HW8 database
postgresStr = ("postgresql://postgres:password@localhost:5432/HW8-sqlalchemy-vacation")
engine = create_engine(postgresStr)
# reflect existing tables/classes
Base = automap_base()
Base.prepare(engine, reflect=True)
# Save reference to the tables
Measurement = Base.classes.measurements
Station = Base.classes.station
# Flask Setup
app = Flask(__name__)
# Set up flask routes
@app.route("/")
def home():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end><br/>"
)
@app.route("/api/v1.0/precipitation")
def precip():
#Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.
#Return the JSON representation of your dictionary.
# Create our session (link) from Python to the DB
session = Session(engine)
#query the db, get a list of all precip measurements and dates
results = session.query(Measurement.date, Measurement.prcp).all()
session.close()
# Convert list of tuples into normal list
precip = list(np.ravel(results))
return jsonify(precip)
@app.route("/api/v1.0/stations")
def stations():
#Return a JSON list of stations from the dataset
# Create our session (link) from Python to the DB
session = Session(engine)
#query the db, get a list of the stations and their respective names
results = session.query(Station.station, Station.name).all()
session.close()
# Convert list of tuples into normal list
stationlist = list(np.ravel(results))
return jsonify(stationlist)
#query for the dates and temperature observations from a year from the last data point.
# return a JSON list of Temperature Observations (tobs) for the previous year.
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
#find the last date in the dataset, query the prior year's temperature observations
last = session.query(func.max(Measurement.date)).limit(1).all()
q_end = last[0][0].strftime("%Y-%m-%d")
q_start = (last[0][0]-dt.timedelta(days = 365)).strftime("%Y-%m-%d")
tobs_results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date < q_end).\
filter(Measurement.date >= q_start).all()
session.close()
# Convert list of tuples into normal list
tobslist = list(np.ravel(tobs_results))
return jsonify(tobslist)
@app.route("/api/v1.0/<start>")
def startonly(start):
# Create our session (link) from Python to the DB
session = Session(engine)
#find the last date in the dataset to use as an ending point for our temperature calculations
last = session.query(func.max(Measurement.date)).limit(1).all()
q_end = last[0][0].strftime("%Y-%m-%d")
stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= q_end).all()
statslist = list(np.ravel(stats))
return jsonify({"StartDate":start,"EndDate":q_end,"TMIN": statslist[0],"TAVG":statslist[1],"TMAX":statslist[2]})
#Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
#When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.
@app.route("/api/v1.0/<start>/<end>")
def daterange(start,end):
# Create our session (link) from Python to the DB
session = Session(engine)
stats2 = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
statslist = list(np.ravel(stats2))
return jsonify({"StartDate":start,"EndDate":end,"TMIN": statslist[0],"TAVG":statslist[1],"TMAX":statslist[2]})
#Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
#When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "7ab964352c1d51b70e3a1a7bf0a624f2d96cfd55",
"index": 8168,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f'Available Routes:<br/>/api/v1.0/precipitation<br/>/api/v1.0/stations<br/>/api/v1.0/tobs<br/>/api/v1.0/<start><br/>/api/v1.0/<start>/<end><br/>'\n )\n\n\n<mask token>\n\n\[email protected]('/api/v1.0/tobs')\ndef tobs():\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n q_start = (last[0][0] - dt.timedelta(days=365)).strftime('%Y-%m-%d')\n tobs_results = session.query(Measurement.date, Measurement.tobs).filter(\n Measurement.date < q_end).filter(Measurement.date >= q_start).all()\n session.close()\n tobslist = list(np.ravel(tobs_results))\n return jsonify(tobslist)\n\n\[email protected]('/api/v1.0/<start>')\ndef startonly(start):\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.\n tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= q_end).all()\n statslist = list(np.ravel(stats))\n return jsonify({'StartDate': start, 'EndDate': q_end, 'TMIN': statslist\n [0], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f'Available Routes:<br/>/api/v1.0/precipitation<br/>/api/v1.0/stations<br/>/api/v1.0/tobs<br/>/api/v1.0/<start><br/>/api/v1.0/<start>/<end><br/>'\n )\n\n\[email protected]('/api/v1.0/precipitation')\ndef precip():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n precip = list(np.ravel(results))\n return jsonify(precip)\n\n\[email protected]('/api/v1.0/stations')\ndef stations():\n session = Session(engine)\n results = session.query(Station.station, Station.name).all()\n session.close()\n stationlist = list(np.ravel(results))\n return jsonify(stationlist)\n\n\[email protected]('/api/v1.0/tobs')\ndef tobs():\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n q_start = (last[0][0] - dt.timedelta(days=365)).strftime('%Y-%m-%d')\n tobs_results = session.query(Measurement.date, Measurement.tobs).filter(\n Measurement.date < q_end).filter(Measurement.date >= q_start).all()\n session.close()\n tobslist = list(np.ravel(tobs_results))\n return jsonify(tobslist)\n\n\[email protected]('/api/v1.0/<start>')\ndef startonly(start):\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.\n tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= q_end).all()\n statslist = list(np.ravel(stats))\n return jsonify({'StartDate': start, 'EndDate': q_end, 'TMIN': statslist\n [0], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\[email protected]('/api/v1.0/<start>/<end>')\ndef daterange(start, end):\n session = Session(engine)\n stats2 = session.query(func.min(Measurement.tobs), func.avg(Measurement\n .tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= end).all()\n statslist = list(np.ravel(stats2))\n return jsonify({'StartDate': start, 'EndDate': end, 'TMIN': statslist[0\n ], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\n<mask token>\n",
"step-3": "<mask token>\nBase.prepare(engine, reflect=True)\n<mask token>\n\n\[email protected]('/')\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f'Available Routes:<br/>/api/v1.0/precipitation<br/>/api/v1.0/stations<br/>/api/v1.0/tobs<br/>/api/v1.0/<start><br/>/api/v1.0/<start>/<end><br/>'\n )\n\n\[email protected]('/api/v1.0/precipitation')\ndef precip():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n precip = list(np.ravel(results))\n return jsonify(precip)\n\n\[email protected]('/api/v1.0/stations')\ndef stations():\n session = Session(engine)\n results = session.query(Station.station, Station.name).all()\n session.close()\n stationlist = list(np.ravel(results))\n return jsonify(stationlist)\n\n\[email protected]('/api/v1.0/tobs')\ndef tobs():\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n q_start = (last[0][0] - dt.timedelta(days=365)).strftime('%Y-%m-%d')\n tobs_results = session.query(Measurement.date, Measurement.tobs).filter(\n Measurement.date < q_end).filter(Measurement.date >= q_start).all()\n session.close()\n tobslist = list(np.ravel(tobs_results))\n return jsonify(tobslist)\n\n\[email protected]('/api/v1.0/<start>')\ndef startonly(start):\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.\n tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= q_end).all()\n statslist = list(np.ravel(stats))\n return jsonify({'StartDate': start, 'EndDate': q_end, 'TMIN': statslist\n [0], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\[email protected]('/api/v1.0/<start>/<end>')\ndef daterange(start, end):\n session = Session(engine)\n stats2 = session.query(func.min(Measurement.tobs), func.avg(Measurement\n .tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= end).all()\n statslist = list(np.ravel(stats2))\n return jsonify({'StartDate': start, 'EndDate': end, 'TMIN': statslist[0\n ], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "import numpy as np\nimport datetime as dt\nfrom datetime import timedelta\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nfrom flask import Flask, jsonify\npostgresStr = (\n 'postgresql://postgres:password@localhost:5432/HW8-sqlalchemy-vacation')\nengine = create_engine(postgresStr)\nBase = automap_base()\nBase.prepare(engine, reflect=True)\nMeasurement = Base.classes.measurements\nStation = Base.classes.station\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f'Available Routes:<br/>/api/v1.0/precipitation<br/>/api/v1.0/stations<br/>/api/v1.0/tobs<br/>/api/v1.0/<start><br/>/api/v1.0/<start>/<end><br/>'\n )\n\n\[email protected]('/api/v1.0/precipitation')\ndef precip():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n precip = list(np.ravel(results))\n return jsonify(precip)\n\n\[email protected]('/api/v1.0/stations')\ndef stations():\n session = Session(engine)\n results = session.query(Station.station, Station.name).all()\n session.close()\n stationlist = list(np.ravel(results))\n return jsonify(stationlist)\n\n\[email protected]('/api/v1.0/tobs')\ndef tobs():\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n q_start = (last[0][0] - dt.timedelta(days=365)).strftime('%Y-%m-%d')\n tobs_results = session.query(Measurement.date, Measurement.tobs).filter(\n Measurement.date < q_end).filter(Measurement.date >= q_start).all()\n session.close()\n tobslist = list(np.ravel(tobs_results))\n return jsonify(tobslist)\n\n\[email protected]('/api/v1.0/<start>')\ndef startonly(start):\n session = Session(engine)\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime('%Y-%m-%d')\n stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.\n tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= q_end).all()\n statslist = list(np.ravel(stats))\n return jsonify({'StartDate': start, 'EndDate': q_end, 'TMIN': statslist\n [0], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\[email protected]('/api/v1.0/<start>/<end>')\ndef daterange(start, end):\n session = Session(engine)\n stats2 = session.query(func.min(Measurement.tobs), func.avg(Measurement\n .tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start\n ).filter(Measurement.date <= end).all()\n statslist = list(np.ravel(stats2))\n return jsonify({'StartDate': start, 'EndDate': end, 'TMIN': statslist[0\n ], 'TAVG': statslist[1], 'TMAX': statslist[2]})\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "#dependencies go here\nimport numpy as np\nimport datetime as dt\nfrom datetime import timedelta\n\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n\n\n\n#Set up the engine to connect to HW8 database\npostgresStr = (\"postgresql://postgres:password@localhost:5432/HW8-sqlalchemy-vacation\")\nengine = create_engine(postgresStr)\n\n# reflect existing tables/classes\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# Save reference to the tables\nMeasurement = Base.classes.measurements\nStation = Base.classes.station\n\n# Flask Setup\napp = Flask(__name__)\n\n# Set up flask routes\[email protected](\"/\")\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n )\n\n\[email protected](\"/api/v1.0/precipitation\")\ndef precip():\n \n #Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.\n #Return the JSON representation of your dictionary.\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n #query the db, get a list of all precip measurements and dates\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n session.close()\n\n # Convert list of tuples into normal list\n precip = list(np.ravel(results))\n return jsonify(precip)\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n \n #Return a JSON list of stations from the dataset\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n #query the db, get a list of the stations and their respective names\n results = session.query(Station.station, Station.name).all()\n\n session.close()\n\n # Convert list of tuples into normal list\n stationlist = list(np.ravel(results))\n return jsonify(stationlist)\n\n#query for the dates and temperature observations from a year from the last data point.\n# return a JSON list of Temperature Observations (tobs) for the previous year.\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \n #find the last date in the dataset, query the prior year's temperature observations\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime(\"%Y-%m-%d\")\n q_start = (last[0][0]-dt.timedelta(days = 365)).strftime(\"%Y-%m-%d\")\n \n tobs_results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date < q_end).\\\n filter(Measurement.date >= q_start).all()\n \n session.close()\n\n # Convert list of tuples into normal list\n tobslist = list(np.ravel(tobs_results))\n \n return jsonify(tobslist)\n\[email protected](\"/api/v1.0/<start>\")\ndef startonly(start):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n #find the last date in the dataset to use as an ending point for our temperature calculations\n last = session.query(func.max(Measurement.date)).limit(1).all()\n q_end = last[0][0].strftime(\"%Y-%m-%d\")\n \n stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= q_end).all()\n\n statslist = list(np.ravel(stats))\n \n return jsonify({\"StartDate\":start,\"EndDate\":q_end,\"TMIN\": statslist[0],\"TAVG\":statslist[1],\"TMAX\":statslist[2]})\n\n #Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.\n #When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.\n\[email protected](\"/api/v1.0/<start>/<end>\")\ndef daterange(start,end):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n stats2 = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n\n statslist = list(np.ravel(stats2))\n \n return jsonify({\"StartDate\":start,\"EndDate\":end,\"TMIN\": statslist[0],\"TAVG\":statslist[1],\"TMAX\":statslist[2]})\n\n #Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.\n #When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
class BusInfo:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
busroute_list = []
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'route_id': v['owl:sameAs'], 'route_name': v
['dc:title']}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
busstop_list = []
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':
v['dc:title']}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list = []
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(
BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color = 'yellow'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color = 'red'
else:
color = 'gray'
bus = {'bus_id': v['odpt:busNumber'], 'lat': v[
'geo:lat'], 'lng': v['geo:long'], 'route_num': v[
'odpt:busroute'][-3:], 'route_id': v[
'odpt:busroutePattern'], 'prevStop': v[
'odpt:fromBusstopPole'], 'nextStop': v[
'odpt:toBusstopPole'], 'occupancy': occupancy,
'color': color, 'azimuth': v['odpt:azimuth'],
'img_url':
'https://mxl00474.github.io/test_static/arrow_' +
color + '.png'}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',
right_index=True, how='left')
return df.fillna('-')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = (
'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')
url_routes = (
'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')
params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
busroute_list = []
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'route_id': v['owl:sameAs'], 'route_name': v
['dc:title']}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
busstop_list = []
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':
v['dc:title']}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list = []
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(
BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color = 'yellow'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color = 'red'
else:
color = 'gray'
bus = {'bus_id': v['odpt:busNumber'], 'lat': v[
'geo:lat'], 'lng': v['geo:long'], 'route_num': v[
'odpt:busroute'][-3:], 'route_id': v[
'odpt:busroutePattern'], 'prevStop': v[
'odpt:fromBusstopPole'], 'nextStop': v[
'odpt:toBusstopPole'], 'occupancy': occupancy,
'color': color, 'azimuth': v['odpt:azimuth'],
'img_url':
'https://mxl00474.github.io/test_static/arrow_' +
color + '.png'}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',
right_index=True, how='left')
return df.fillna('-')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = (
'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')
url_routes = (
'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')
params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
busroute_list = []
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'route_id': v['owl:sameAs'], 'route_name': v
['dc:title']}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
busstop_list = []
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':
v['dc:title']}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list = []
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(
BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color = 'yellow'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color = 'red'
else:
color = 'gray'
bus = {'bus_id': v['odpt:busNumber'], 'lat': v[
'geo:lat'], 'lng': v['geo:long'], 'route_num': v[
'odpt:busroute'][-3:], 'route_id': v[
'odpt:busroutePattern'], 'prevStop': v[
'odpt:fromBusstopPole'], 'nextStop': v[
'odpt:toBusstopPole'], 'occupancy': occupancy,
'color': color, 'azimuth': v['odpt:azimuth'],
'img_url':
'https://mxl00474.github.io/test_static/arrow_' +
color + '.png'}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',
right_index=True, how='left')
return df.fillna('-')
if __name__ == '__main__':
BusInfo.init()
print('=== Get stop info ===')
BusInfo.getBusStops()
print(BusInfo.bus_stops)
print('=== Get route info ===')
BusInfo.getBusRoutes()
print(len(BusInfo.bus_routes))
print('=== Get bus info ===')
bus_list = BusInfo.update()
print(bus_list)
print(bus_list.columns)
<|reserved_special_token_1|>
from urllib import request, parse
import pandas as pd
import json
import os
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = (
'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')
url_routes = (
'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')
params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
busroute_list = []
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'route_id': v['owl:sameAs'], 'route_name': v
['dc:title']}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
busstop_list = []
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.
urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':
v['dc:title']}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list = []
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(
BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color = 'blue'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color = 'yellow'
elif v['odpt:occupancyStatus'
] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color = 'red'
else:
color = 'gray'
bus = {'bus_id': v['odpt:busNumber'], 'lat': v[
'geo:lat'], 'lng': v['geo:long'], 'route_num': v[
'odpt:busroute'][-3:], 'route_id': v[
'odpt:busroutePattern'], 'prevStop': v[
'odpt:fromBusstopPole'], 'nextStop': v[
'odpt:toBusstopPole'], 'occupancy': occupancy,
'color': color, 'azimuth': v['odpt:azimuth'],
'img_url':
'https://mxl00474.github.io/test_static/arrow_' +
color + '.png'}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',
right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',
right_index=True, how='left')
return df.fillna('-')
if __name__ == '__main__':
BusInfo.init()
print('=== Get stop info ===')
BusInfo.getBusStops()
print(BusInfo.bus_stops)
print('=== Get route info ===')
BusInfo.getBusRoutes()
print(len(BusInfo.bus_routes))
print('=== Get bus info ===')
bus_list = BusInfo.update()
print(bus_list)
print(bus_list.columns)
<|reserved_special_token_1|>
from urllib import request, parse
import pandas as pd
import json
import os
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'
url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'
params = {
'odpt:operator': 'odpt.Operator:YokohamaMunicipal',
}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
#BusInfo.bus_routes = pd.DataFrame()
#return
busroute_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'route_id': v['owl:sameAs'],
'route_name': v['dc:title'],
}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
#BusInfo.bus_stops = pd.DataFrame()
#return
busstop_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'busstop_id': v['owl:sameAs'],
'pole_name': v['dc:title'],
}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list=[]
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color='yellow'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color='red'
else:
color='gray'
bus = { 'bus_id': v['odpt:busNumber'],
'lat': v['geo:lat'],
'lng': v['geo:long'],
'route_num': v['odpt:busroute'][-3:],
'route_id': v['odpt:busroutePattern'],
'prevStop': v['odpt:fromBusstopPole'],
'nextStop': v['odpt:toBusstopPole'],
'occupancy' : occupancy,
'color' : color,
'azimuth' : v['odpt:azimuth'],
'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'
}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')
return df.fillna("-")
if __name__ == '__main__':
BusInfo.init()
print('=== Get stop info ===')
BusInfo.getBusStops()
print(BusInfo.bus_stops)
print('=== Get route info ===')
BusInfo.getBusRoutes()
#print(BusInfo.bus_routes)
print(len(BusInfo.bus_routes))
print('=== Get bus info ===')
bus_list = BusInfo.update()
print(bus_list)
print(bus_list.columns)
|
flexible
|
{
"blob_id": "7eefcfdb9682cb09ce2d85d11aafc04977016ba4",
"index": 8332,
"step-1": "<mask token>\n\n\nclass BusInfo:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\nif __name__ == '__main__':\n BusInfo.init()\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n print(len(BusInfo.bus_routes))\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n",
"step-4": "from urllib import request, parse\nimport pandas as pd\nimport json\nimport os\n\n\nclass BusInfo:\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json')\n url_routes = (\n 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json')\n params = {'odpt:operator': 'odpt.Operator:YokohamaMunicipal'}\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n busroute_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'route_id': v['owl:sameAs'], 'route_name': v\n ['dc:title']}\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n busstop_list = []\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.\n urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n busstop = {'busstop_id': v['owl:sameAs'], 'pole_name':\n v['dc:title']}\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list = []\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(\n BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res)\n for v in json_load:\n try:\n if v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color = 'blue'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color = 'yellow'\n elif v['odpt:occupancyStatus'\n ] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color = 'red'\n else:\n color = 'gray'\n bus = {'bus_id': v['odpt:busNumber'], 'lat': v[\n 'geo:lat'], 'lng': v['geo:long'], 'route_num': v[\n 'odpt:busroute'][-3:], 'route_id': v[\n 'odpt:busroutePattern'], 'prevStop': v[\n 'odpt:fromBusstopPole'], 'nextStop': v[\n 'odpt:toBusstopPole'], 'occupancy': occupancy,\n 'color': color, 'azimuth': v['odpt:azimuth'],\n 'img_url': \n 'https://mxl00474.github.io/test_static/arrow_' +\n color + '.png'}\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop',\n right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id',\n right_index=True, how='left')\n return df.fillna('-')\n\n\nif __name__ == '__main__':\n BusInfo.init()\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n print(len(BusInfo.bus_routes))\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n",
"step-5": "from urllib import request, parse\nimport pandas as pd\nimport json\nimport os\n\nclass BusInfo:\n\n url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'\n url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'\n url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'\n \n params = {\n 'odpt:operator': 'odpt.Operator:YokohamaMunicipal',\n }\n\n bus_stops = None\n bus_routes = None\n\n @staticmethod\n def init():\n \n apiKey = os.getenv('BUS_TOKEN')\n BusInfo.params['acl:consumerKey'] = apiKey\n\n BusInfo.getBusStops()\n BusInfo.getBusRoutes()\n\n @staticmethod\n def getBusRoutes():\n \n #BusInfo.bus_routes = pd.DataFrame()\n #return\n\n busroute_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n busstop = { 'route_id': v['owl:sameAs'],\n 'route_name': v['dc:title'],\n }\n busroute_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')\n\n @staticmethod\n def getBusStops():\n\n #BusInfo.bus_stops = pd.DataFrame()\n #return\n\n busstop_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n busstop = { 'busstop_id': v['owl:sameAs'],\n 'pole_name': v['dc:title'],\n }\n busstop_list.append(busstop)\n except Exception:\n pass\n BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')\n\n @staticmethod\n def update():\n bus_list=[]\n req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))\n with request.urlopen(req) as res:\n json_load = json.load(res) \n for v in json_load:\n try:\n\n if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':\n occupancy = '空いている'\n color='blue'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':\n occupancy = '空き座席多数'\n color='blue'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':\n occupancy = '座席わすか'\n color='yellow'\n elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':\n occupancy = '混雑'\n color='red'\n else:\n color='gray'\n\n bus = { 'bus_id': v['odpt:busNumber'],\n 'lat': v['geo:lat'],\n 'lng': v['geo:long'],\n 'route_num': v['odpt:busroute'][-3:],\n 'route_id': v['odpt:busroutePattern'],\n 'prevStop': v['odpt:fromBusstopPole'],\n 'nextStop': v['odpt:toBusstopPole'],\n 'occupancy' : occupancy,\n 'color' : color,\n 'azimuth' : v['odpt:azimuth'],\n 'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'\n }\n bus_list.append(bus)\n except Exception:\n pass\n df = pd.DataFrame(bus_list).set_index('bus_id')\n df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')\n df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')\n return df.fillna(\"-\")\n\nif __name__ == '__main__':\n\n BusInfo.init()\n\n print('=== Get stop info ===')\n BusInfo.getBusStops()\n print(BusInfo.bus_stops)\n\n print('=== Get route info ===')\n BusInfo.getBusRoutes()\n #print(BusInfo.bus_routes)\n print(len(BusInfo.bus_routes))\n\n print('=== Get bus info ===')\n bus_list = BusInfo.update()\n print(bus_list)\n print(bus_list.columns)\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
'''
Trolls are attacking your comment section!
A common way to deal with this situation is to remove all of the vowels from the trolls' comments, neutralizing the threat.
Your task is to write a function that takes a string and return a new string with all vowels removed.
For example, the string "This website is for losers LOL!" would become "Ths wbst s fr lsrs LL!".
Note: for this kata y isn't considered a vowel.
'''
#%%
def disemvowel(string):
returnString =""
vowels = ["a","e", "i", "o", "u"]
upperVowels = ["A", "E", "I", "O", "U"]
vowless = [i for i in string if i not in vowels and i not in upperVowels]
for letters in vowless:
returnString += letters
return returnString
string = "hEllo"
dis = disemvowel(string)
dis
#%%
def disemvowel(s):
return s.translate(None, "aeiouAEIOU")
e = "Hello"
i = disemvowel(e)
i
# %%
|
normal
|
{
"blob_id": "4dea0967a0ee3e9eb3b46145739dfeb233f3a120",
"index": 5307,
"step-1": "<mask token>\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef disemvowel(string):\n returnString = ''\n vowels = ['a', 'e', 'i', 'o', 'u']\n upperVowels = ['A', 'E', 'I', 'O', 'U']\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n return returnString\n\n\n<mask token>\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef disemvowel(string):\n returnString = ''\n vowels = ['a', 'e', 'i', 'o', 'u']\n upperVowels = ['A', 'E', 'I', 'O', 'U']\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n return returnString\n\n\n<mask token>\ndis\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\n<mask token>\ni\n",
"step-4": "<mask token>\n\n\ndef disemvowel(string):\n returnString = ''\n vowels = ['a', 'e', 'i', 'o', 'u']\n upperVowels = ['A', 'E', 'I', 'O', 'U']\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n return returnString\n\n\nstring = 'hEllo'\ndis = disemvowel(string)\ndis\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\ne = 'Hello'\ni = disemvowel(e)\ni\n",
"step-5": "'''\nTrolls are attacking your comment section!\n\nA common way to deal with this situation is to remove all of the vowels from the trolls' comments, neutralizing the threat.\n\nYour task is to write a function that takes a string and return a new string with all vowels removed.\n\nFor example, the string \"This website is for losers LOL!\" would become \"Ths wbst s fr lsrs LL!\".\n\nNote: for this kata y isn't considered a vowel.\n\n\n'''\n#%%\ndef disemvowel(string):\n returnString =\"\"\n vowels = [\"a\",\"e\", \"i\", \"o\", \"u\"]\n upperVowels = [\"A\", \"E\", \"I\", \"O\", \"U\"]\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n\n return returnString\n\nstring = \"hEllo\"\ndis = disemvowel(string)\ndis\n\n\n\n#%%\ndef disemvowel(s):\n return s.translate(None, \"aeiouAEIOU\")\n\ne = \"Hello\"\ni = disemvowel(e)\ni\n# %%\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
from io import BytesIO
import telegram
from flask import Flask, request, send_file
from fsm import TocMachine
API_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'
WEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'
app = Flask(__name__)
bot = telegram.Bot(token=API_TOKEN)
machine = TocMachine(
states=[
'user',
'state3',
'state4',
'state5',
'state6',
'state7',
'state8',
'state9',
'state10',
'state11',
'state12',
'state13',
'state14',
'state15'
],
transitions=[
{
'trigger': 'advance',
'source': 'user',
'dest': 'state3',
'conditions': 'is_going_from_state0_to_state3'
},
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state4',
'conditions': 'is_going_from_state3_to_state4'
},
{
'trigger': 'advance',
'source': 'state4',
'dest': 'state5',
'conditions': 'is_going_from_state4_to_state5'
},
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state6',
'conditions': 'is_going_from_state5_to_state6'
},
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state7',
'conditions': 'is_going_from_state5_to_state7'
},
{
'trigger': 'advance',
'source': 'state4',
'dest': 'state8',
'conditions': 'is_going_from_state4_to_state8'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state9',
'conditions': 'is_going_from_state8_to_state9'
},
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state8',
'conditions': 'is_going_from_state6_to_state8'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state8',
'conditions': 'is_going_from_state7_to_state8'
},
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state5',
'conditions': 'is_going_from_state9_to_state5'
},
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state10',
'conditions': 'is_going_from_state9_to_state10'
},
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state10',
'conditions': 'is_going_from_state6_to_state10'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state10',
'conditions': 'is_going_from_state7_to_state10'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state11',
'conditions': 'is_going_from_state8_to_state11'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state10',
'conditions': 'is_going_from_state11_to_state10'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state5',
'conditions': 'is_going_from_state11_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state12',
'conditions': 'is_going_from_state8_to_state12'
},
{
'trigger': 'advance',
'source': 'state12',
'dest': 'state10',
'conditions': 'is_going_from_state12_to_state10'
},
{
'trigger': 'advance',
'source': 'state12',
'dest': 'state5',
'conditions': 'is_going_from_state12_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state13',
'conditions': 'is_going_from_state8_to_state13'
},
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state10',
'conditions': 'is_going_from_state13_to_state10'
},
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state5',
'conditions': 'is_going_from_state13_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state14',
'conditions': 'is_going_from_state8_to_state14'
},
{
'trigger': 'advance',
'source': 'state14',
'dest': 'state10',
'conditions': 'is_going_from_state14_to_state10'
},
{
'trigger': 'advance',
'source': 'state14',
'dest': 'state5',
'conditions': 'is_going_from_state14_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state15',
'conditions': 'is_going_from_state8_to_state15'
},
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state10',
'conditions': 'is_going_from_state15_to_state10'
},
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state5',
'conditions': 'is_going_from_state15_to_state5'
},
{
'trigger': 'go_back',
'source': [
'state10'
],
'dest': 'user'
}
],
initial='user',
auto_transitions=False,
show_conditions=True,
)
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
if __name__ == "__main__":
_set_webhook()
app.run()
|
normal
|
{
"blob_id": "984efa858e782777472d84aab85471616a05b0e0",
"index": 2886,
"step-1": "<mask token>\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\[email protected]('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\[email protected]('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\[email protected]('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\[email protected]('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\nif __name__ == '__main__':\n _set_webhook()\n app.run()\n",
"step-3": "<mask token>\nAPI_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'\nWEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(states=['user', 'state3', 'state4', 'state5', 'state6',\n 'state7', 'state8', 'state9', 'state10', 'state11', 'state12',\n 'state13', 'state14', 'state15'], transitions=[{'trigger': 'advance',\n 'source': 'user', 'dest': 'state3', 'conditions':\n 'is_going_from_state0_to_state3'}, {'trigger': 'advance', 'source':\n 'state3', 'dest': 'state4', 'conditions':\n 'is_going_from_state3_to_state4'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state5', 'conditions':\n 'is_going_from_state4_to_state5'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state6', 'conditions':\n 'is_going_from_state5_to_state6'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state7', 'conditions':\n 'is_going_from_state5_to_state7'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state8', 'conditions':\n 'is_going_from_state4_to_state8'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state9', 'conditions':\n 'is_going_from_state8_to_state9'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state8', 'conditions':\n 'is_going_from_state6_to_state8'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state8', 'conditions':\n 'is_going_from_state7_to_state8'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state5', 'conditions':\n 'is_going_from_state9_to_state5'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state10', 'conditions':\n 'is_going_from_state9_to_state10'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state10', 'conditions':\n 'is_going_from_state6_to_state10'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state10', 'conditions':\n 'is_going_from_state7_to_state10'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state11', 'conditions':\n 'is_going_from_state8_to_state11'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state10', 'conditions':\n 'is_going_from_state11_to_state10'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state5', 'conditions':\n 'is_going_from_state11_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state12', 'conditions':\n 'is_going_from_state8_to_state12'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state10', 'conditions':\n 'is_going_from_state12_to_state10'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state5', 'conditions':\n 'is_going_from_state12_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state13', 'conditions':\n 'is_going_from_state8_to_state13'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state10', 'conditions':\n 'is_going_from_state13_to_state10'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state5', 'conditions':\n 'is_going_from_state13_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state14', 'conditions':\n 'is_going_from_state8_to_state14'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state10', 'conditions':\n 'is_going_from_state14_to_state10'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state5', 'conditions':\n 'is_going_from_state14_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state15', 'conditions':\n 'is_going_from_state8_to_state15'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state10', 'conditions':\n 'is_going_from_state15_to_state10'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state5', 'conditions':\n 'is_going_from_state15_to_state5'}, {'trigger': 'go_back', 'source': [\n 'state10'], 'dest': 'user'}], initial='user', auto_transitions=False,\n show_conditions=True)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\[email protected]('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\[email protected]('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\nif __name__ == '__main__':\n _set_webhook()\n app.run()\n",
"step-4": "import sys\nfrom io import BytesIO\nimport telegram\nfrom flask import Flask, request, send_file\nfrom fsm import TocMachine\nAPI_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'\nWEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(states=['user', 'state3', 'state4', 'state5', 'state6',\n 'state7', 'state8', 'state9', 'state10', 'state11', 'state12',\n 'state13', 'state14', 'state15'], transitions=[{'trigger': 'advance',\n 'source': 'user', 'dest': 'state3', 'conditions':\n 'is_going_from_state0_to_state3'}, {'trigger': 'advance', 'source':\n 'state3', 'dest': 'state4', 'conditions':\n 'is_going_from_state3_to_state4'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state5', 'conditions':\n 'is_going_from_state4_to_state5'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state6', 'conditions':\n 'is_going_from_state5_to_state6'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state7', 'conditions':\n 'is_going_from_state5_to_state7'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state8', 'conditions':\n 'is_going_from_state4_to_state8'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state9', 'conditions':\n 'is_going_from_state8_to_state9'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state8', 'conditions':\n 'is_going_from_state6_to_state8'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state8', 'conditions':\n 'is_going_from_state7_to_state8'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state5', 'conditions':\n 'is_going_from_state9_to_state5'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state10', 'conditions':\n 'is_going_from_state9_to_state10'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state10', 'conditions':\n 'is_going_from_state6_to_state10'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state10', 'conditions':\n 'is_going_from_state7_to_state10'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state11', 'conditions':\n 'is_going_from_state8_to_state11'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state10', 'conditions':\n 'is_going_from_state11_to_state10'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state5', 'conditions':\n 'is_going_from_state11_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state12', 'conditions':\n 'is_going_from_state8_to_state12'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state10', 'conditions':\n 'is_going_from_state12_to_state10'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state5', 'conditions':\n 'is_going_from_state12_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state13', 'conditions':\n 'is_going_from_state8_to_state13'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state10', 'conditions':\n 'is_going_from_state13_to_state10'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state5', 'conditions':\n 'is_going_from_state13_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state14', 'conditions':\n 'is_going_from_state8_to_state14'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state10', 'conditions':\n 'is_going_from_state14_to_state10'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state5', 'conditions':\n 'is_going_from_state14_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state15', 'conditions':\n 'is_going_from_state8_to_state15'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state10', 'conditions':\n 'is_going_from_state15_to_state10'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state5', 'conditions':\n 'is_going_from_state15_to_state5'}, {'trigger': 'go_back', 'source': [\n 'state10'], 'dest': 'user'}], initial='user', auto_transitions=False,\n show_conditions=True)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\[email protected]('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\[email protected]('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\nif __name__ == '__main__':\n _set_webhook()\n app.run()\n",
"step-5": "import sys\nfrom io import BytesIO\n\nimport telegram\nfrom flask import Flask, request, send_file\n\nfrom fsm import TocMachine\n\n\nAPI_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'\nWEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'\n\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(\n states=[\n 'user',\n 'state3',\n 'state4',\n 'state5',\n 'state6',\n 'state7',\n 'state8',\n 'state9',\n 'state10',\n 'state11',\n 'state12',\n 'state13',\n 'state14',\n 'state15'\n ],\n transitions=[\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'state3',\n 'conditions': 'is_going_from_state0_to_state3'\n },\n {\n 'trigger': 'advance',\n 'source': 'state3',\n 'dest': 'state4',\n 'conditions': 'is_going_from_state3_to_state4'\n },\n {\n 'trigger': 'advance',\n 'source': 'state4',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state4_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state5',\n 'dest': 'state6',\n 'conditions': 'is_going_from_state5_to_state6'\n },\n {\n 'trigger': 'advance',\n 'source': 'state5',\n 'dest': 'state7',\n 'conditions': 'is_going_from_state5_to_state7'\n },\n {\n 'trigger': 'advance',\n 'source': 'state4',\n 'dest': 'state8',\n 'conditions': 'is_going_from_state4_to_state8'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state9',\n 'conditions': 'is_going_from_state8_to_state9'\n },\n {\n 'trigger': 'advance',\n 'source': 'state6',\n 'dest': 'state8',\n 'conditions': 'is_going_from_state6_to_state8'\n },\n {\n 'trigger': 'advance',\n 'source': 'state7',\n 'dest': 'state8',\n 'conditions': 'is_going_from_state7_to_state8'\n },\n {\n 'trigger': 'advance',\n 'source': 'state9',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state9_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state9',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state9_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state6',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state6_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state7',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state7_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state11',\n 'conditions': 'is_going_from_state8_to_state11'\n },\n {\n 'trigger': 'advance',\n 'source': 'state11',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state11_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state11',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state11_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state12',\n 'conditions': 'is_going_from_state8_to_state12'\n },\n {\n 'trigger': 'advance',\n 'source': 'state12',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state12_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state12',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state12_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state13',\n 'conditions': 'is_going_from_state8_to_state13'\n },\n {\n 'trigger': 'advance',\n 'source': 'state13',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state13_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state13',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state13_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state14',\n 'conditions': 'is_going_from_state8_to_state14'\n },\n {\n 'trigger': 'advance',\n 'source': 'state14',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state14_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state14',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state14_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state15',\n 'conditions': 'is_going_from_state8_to_state15'\n },\n {\n 'trigger': 'advance',\n 'source': 'state15',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state15_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state15',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state15_to_state5'\n },\n {\n 'trigger': 'go_back',\n 'source': [\n 'state10'\n ],\n 'dest': 'user'\n }\n ],\n initial='user',\n auto_transitions=False,\n show_conditions=True,\n)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\[email protected]('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\[email protected]('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')\n\n\nif __name__ == \"__main__\":\n _set_webhook()\n app.run()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def bfs(graph, start):
queue = [start]
queued = list()
path = list()
while queue:
print('Queue is: %s' % queue)
vertex = queue.pop(0)
print('Processing %s' % vertex)
for candidate in graph[vertex]:
if candidate not in queued:
queued.append(candidate)
queue.append(candidate)
path.append(vertex + '>' + candidate)
print('Adding %s to the queue' % candidate)
return path
|
flexible
|
{
"blob_id": "7bb49712c4ef482c64f3c2a457a766de691ba7c3",
"index": 9427,
"step-1": "<mask token>\n",
"step-2": "def bfs(graph, start):\n queue = [start]\n queued = list()\n path = list()\n while queue:\n print('Queue is: %s' % queue)\n vertex = queue.pop(0)\n print('Processing %s' % vertex)\n for candidate in graph[vertex]:\n if candidate not in queued:\n queued.append(candidate)\n queue.append(candidate)\n path.append(vertex + '>' + candidate)\n print('Adding %s to the queue' % candidate)\n return path\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.